From be664bf6e035932982c273c77def432b17eacda4 Mon Sep 17 00:00:00 2001 From: hboeglen <60876763+hboeglen@users.noreply.github.com> Date: Thu, 24 Jun 2021 08:41:03 +0200 Subject: [PATCH 1/2] Version 3.7 --- .gitignore | 2 - CMakeLists.txt | 181 - LICENSE | 674 -- MANIFEST.md | 16 - README.md | 67 - apps/CMakeLists.txt | 21 - cmake/Modules/CMakeParseArgumentsCopy.cmake | 138 - cmake/Modules/FindCppUnit.cmake | 39 - cmake/Modules/FindGnuradioRuntime.cmake | 36 - cmake/Modules/GrMiscUtils.cmake | 521 - cmake/Modules/GrPlatform.cmake | 50 - cmake/Modules/GrPython.cmake | 237 - cmake/Modules/GrSwig.cmake | 247 - cmake/Modules/GrTest.cmake | 139 - cmake/Modules/UseSWIG.cmake | 304 - cmake/Modules/nordicConfig.cmake | 30 - cmake/cmake_uninstall.cmake.in | 32 - docs/CMakeLists.txt | 31 - docs/README.nordic | 11 - docs/doxygen/CMakeLists.txt | 48 - docs/doxygen/Doxyfile.in | 1922 ---- docs/doxygen/Doxyfile.swig_doc.in | 1890 ---- docs/doxygen/doxyxml/__init__.py | 79 - docs/doxygen/doxyxml/base.py | 216 - docs/doxygen/doxyxml/doxyindex.py | 234 - docs/doxygen/doxyxml/generated/__init__.py | 7 - docs/doxygen/doxyxml/generated/compound.py | 503 - .../doxyxml/generated/compoundsuper.py | 8342 ----------------- docs/doxygen/doxyxml/generated/index.py | 77 - docs/doxygen/doxyxml/generated/indexsuper.py | 523 -- docs/doxygen/doxyxml/text.py | 53 - docs/doxygen/other/group_defs.dox | 7 - docs/doxygen/other/main_page.dox | 10 - docs/doxygen/swig_doc.py | 252 - examples/microsoft_mouse_sniffer.py | 171 - examples/nordic_auto_ack.py | 170 - examples/nordic_channelized_receiver.py | 132 - examples/nordic_channelized_transmitter.py | 133 - examples/nordic_receiver.py | 134 - examples/nordic_sniffer_scanner.py | 142 - grc/CMakeLists.txt | 18 - grc/nordic_nordic_rx.xml | 38 - grc/nordic_nordic_tx.xml | 38 - include/nordic/CMakeLists.txt | 24 - include/nordic/api.h | 29 - include/nordic/nordic_rx.h | 63 - include/nordic/nordic_tx.h | 56 - lib/CMakeLists.txt | 76 - lib/bit_shifting_byte_vector.cc | 79 - lib/bit_shifting_byte_vector.h | 72 - lib/enhanced_shockburst_packet.cc | 178 - lib/enhanced_shockburst_packet.h | 102 - lib/nordic_rx_impl.cc | 152 - lib/nordic_rx_impl.h | 74 - lib/nordic_tx_impl.cc | 155 - lib/nordic_tx_impl.h | 63 - lib/nordictap.h | 36 - lib/qa_nordic.cc | 32 - lib/qa_nordic.h | 34 - lib/test_nordic.cc | 44 - python/CMakeLists.txt | 39 - python/__init__.py | 35 - python/build_utils.py | 218 - python/build_utils_codes.py | 48 - swig/CMakeLists.txt | 61 - swig/nordic_swig.i | 19 - wireshark/nordic_dissector.lua | 115 - 67 files changed, 19719 deletions(-) delete mode 100644 .gitignore delete mode 100644 CMakeLists.txt delete mode 100644 LICENSE delete mode 100644 MANIFEST.md delete mode 100644 README.md delete mode 100644 apps/CMakeLists.txt delete mode 100644 cmake/Modules/CMakeParseArgumentsCopy.cmake delete mode 100644 cmake/Modules/FindCppUnit.cmake delete mode 100644 cmake/Modules/FindGnuradioRuntime.cmake delete mode 100644 cmake/Modules/GrMiscUtils.cmake delete mode 100644 cmake/Modules/GrPlatform.cmake delete mode 100644 cmake/Modules/GrPython.cmake delete mode 100644 cmake/Modules/GrSwig.cmake delete mode 100644 cmake/Modules/GrTest.cmake delete mode 100644 cmake/Modules/UseSWIG.cmake delete mode 100644 cmake/Modules/nordicConfig.cmake delete mode 100644 cmake/cmake_uninstall.cmake.in delete mode 100644 docs/CMakeLists.txt delete mode 100644 docs/README.nordic delete mode 100644 docs/doxygen/CMakeLists.txt delete mode 100644 docs/doxygen/Doxyfile.in delete mode 100644 docs/doxygen/Doxyfile.swig_doc.in delete mode 100644 docs/doxygen/doxyxml/__init__.py delete mode 100644 docs/doxygen/doxyxml/base.py delete mode 100644 docs/doxygen/doxyxml/doxyindex.py delete mode 100644 docs/doxygen/doxyxml/generated/__init__.py delete mode 100644 docs/doxygen/doxyxml/generated/compound.py delete mode 100644 docs/doxygen/doxyxml/generated/compoundsuper.py delete mode 100644 docs/doxygen/doxyxml/generated/index.py delete mode 100644 docs/doxygen/doxyxml/generated/indexsuper.py delete mode 100644 docs/doxygen/doxyxml/text.py delete mode 100644 docs/doxygen/other/group_defs.dox delete mode 100644 docs/doxygen/other/main_page.dox delete mode 100644 docs/doxygen/swig_doc.py delete mode 100755 examples/microsoft_mouse_sniffer.py delete mode 100755 examples/nordic_auto_ack.py delete mode 100755 examples/nordic_channelized_receiver.py delete mode 100755 examples/nordic_channelized_transmitter.py delete mode 100755 examples/nordic_receiver.py delete mode 100755 examples/nordic_sniffer_scanner.py delete mode 100644 grc/CMakeLists.txt delete mode 100644 grc/nordic_nordic_rx.xml delete mode 100644 grc/nordic_nordic_tx.xml delete mode 100644 include/nordic/CMakeLists.txt delete mode 100644 include/nordic/api.h delete mode 100644 include/nordic/nordic_rx.h delete mode 100644 include/nordic/nordic_tx.h delete mode 100644 lib/CMakeLists.txt delete mode 100644 lib/bit_shifting_byte_vector.cc delete mode 100644 lib/bit_shifting_byte_vector.h delete mode 100644 lib/enhanced_shockburst_packet.cc delete mode 100644 lib/enhanced_shockburst_packet.h delete mode 100644 lib/nordic_rx_impl.cc delete mode 100644 lib/nordic_rx_impl.h delete mode 100644 lib/nordic_tx_impl.cc delete mode 100644 lib/nordic_tx_impl.h delete mode 100644 lib/nordictap.h delete mode 100644 lib/qa_nordic.cc delete mode 100644 lib/qa_nordic.h delete mode 100644 lib/test_nordic.cc delete mode 100644 python/CMakeLists.txt delete mode 100644 python/__init__.py delete mode 100644 python/build_utils.py delete mode 100644 python/build_utils_codes.py delete mode 100644 swig/CMakeLists.txt delete mode 100644 swig/nordic_swig.i delete mode 100644 wireshark/nordic_dissector.lua diff --git a/.gitignore b/.gitignore deleted file mode 100644 index b92d662..0000000 --- a/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -build/ -*.pyc diff --git a/CMakeLists.txt b/CMakeLists.txt deleted file mode 100644 index 3874c97..0000000 --- a/CMakeLists.txt +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -######################################################################## -# Project setup -######################################################################## -cmake_minimum_required(VERSION 2.6) -project(gr-nordic CXX C) -enable_testing() - -#select the release build type by default to get optimization flags -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE "Release") - message(STATUS "Build type not specified: defaulting to release.") -endif(NOT CMAKE_BUILD_TYPE) -set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "") - -#make sure our local CMake Modules path comes first -list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules) - -######################################################################## -# Compiler specific setup -######################################################################## -if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32) - #http://gcc.gnu.org/wiki/Visibility - add_definitions(-fvisibility=hidden) -endif() - -######################################################################## -# Find boost -######################################################################## -if(UNIX AND EXISTS "/usr/lib64") - list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix -endif(UNIX AND EXISTS "/usr/lib64") -set(Boost_ADDITIONAL_VERSIONS - "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39" - "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44" - "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49" - "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54" - "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59" - "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64" - "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69" -) -find_package(Boost "1.35" COMPONENTS filesystem system) - -if(NOT Boost_FOUND) - message(FATAL_ERROR "Boost required to compile nordic") -endif() - -######################################################################## -# Install directories -######################################################################## -include(GrPlatform) #define LIB_SUFFIX -set(GR_RUNTIME_DIR bin) -set(GR_LIBRARY_DIR lib${LIB_SUFFIX}) -set(GR_INCLUDE_DIR include/nordic) -set(GR_DATA_DIR share) -set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME}) -set(GR_DOC_DIR ${GR_DATA_DIR}/doc) -set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME}) -set(GR_CONF_DIR etc) -set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d) -set(GR_LIBEXEC_DIR libexec) -set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME}) -set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks) - -######################################################################## -# On Apple only, set install name and use rpath correctly, if not already set -######################################################################## -if(APPLE) - if(NOT CMAKE_INSTALL_NAME_DIR) - set(CMAKE_INSTALL_NAME_DIR - ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE - PATH "Library Install Name Destination Directory" FORCE) - endif(NOT CMAKE_INSTALL_NAME_DIR) - if(NOT CMAKE_INSTALL_RPATH) - set(CMAKE_INSTALL_RPATH - ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE - PATH "Library Install RPath" FORCE) - endif(NOT CMAKE_INSTALL_RPATH) - if(NOT CMAKE_BUILD_WITH_INSTALL_RPATH) - set(CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE - BOOL "Do Build Using Library Install RPath" FORCE) - endif(NOT CMAKE_BUILD_WITH_INSTALL_RPATH) -endif(APPLE) - -######################################################################## -# Find gnuradio build dependencies -######################################################################## -find_package(CppUnit) -find_package(Doxygen) - -# Search for GNU Radio and its components and versions. Add any -# components required to the list of GR_REQUIRED_COMPONENTS (in all -# caps such as FILTER or FFT) and change the version to the minimum -# API compatible version required. -set(GR_REQUIRED_COMPONENTS RUNTIME) -find_package(Gnuradio "3.7.2" REQUIRED) - -if(NOT CPPUNIT_FOUND) - message(FATAL_ERROR "CppUnit required to compile nordic") -endif() - -######################################################################## -# Setup doxygen option -######################################################################## -if(DOXYGEN_FOUND) - option(ENABLE_DOXYGEN "Build docs using Doxygen" ON) -else(DOXYGEN_FOUND) - option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF) -endif(DOXYGEN_FOUND) - -######################################################################## -# Setup the include and linker paths -######################################################################## -include_directories( - ${CMAKE_SOURCE_DIR}/lib - ${CMAKE_SOURCE_DIR}/include - ${CMAKE_BINARY_DIR}/lib - ${CMAKE_BINARY_DIR}/include - ${Boost_INCLUDE_DIRS} - ${CPPUNIT_INCLUDE_DIRS} - ${GNURADIO_ALL_INCLUDE_DIRS} -) - -link_directories( - ${Boost_LIBRARY_DIRS} - ${CPPUNIT_LIBRARY_DIRS} - ${GNURADIO_RUNTIME_LIBRARY_DIRS} -) - -# Set component parameters -set(GR_NORDIC_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE) -set(GR_NORDIC_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE) - -######################################################################## -# Create uninstall target -######################################################################## -configure_file( - ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake -@ONLY) - -add_custom_target(uninstall - ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake -) - -######################################################################## -# Add subdirectories -######################################################################## -add_subdirectory(include/nordic) -add_subdirectory(lib) -add_subdirectory(swig) -add_subdirectory(python) -add_subdirectory(grc) -add_subdirectory(apps) -add_subdirectory(docs) - -######################################################################## -# Install cmake search helper for this library -######################################################################## -if(NOT CMAKE_MODULES_DIR) - set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake) -endif(NOT CMAKE_MODULES_DIR) - -install(FILES cmake/Modules/nordicConfig.cmake - DESTINATION ${CMAKE_MODULES_DIR}/nordic -) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 9cecc1d..0000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {one line to give the program's name and a brief idea of what it does.} - Copyright (C) {year} {name of author} - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - {project} Copyright (C) {year} {fullname} - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/MANIFEST.md b/MANIFEST.md deleted file mode 100644 index 49f9201..0000000 --- a/MANIFEST.md +++ /dev/null @@ -1,16 +0,0 @@ -title: The NORDIC OOT Module -brief: Short description of gr-nordic -tags: # Tags are arbitrary, but look at CGRAN what other authors are using - - sdr -author: - - Author Name -copyright_owner: - - Copyright Owner 1 -license: -#repo: # Put the URL of the repository here, or leave blank for default -#website: # If you have a separate project website, put it here -#icon: # Put a URL to a square image here that will be used as an icon on CGRAN ---- -A longer, multi-line description of gr-nordic. -You may use some *basic* Markdown here. -If left empty, it will try to find a README file instead. diff --git a/README.md b/README.md deleted file mode 100644 index 4afb6ab..0000000 --- a/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# gr-nordic - -GNU Radio module and Wireshark dissector for the Nordic Semiconductor nRF24L Enhanced Shockburst protocol. - -## external c++ classes - -### nordic_rx - -Receiver class which consumes a GFSK demodulated bitstream and reconstructs Enhanced Shockburst packets. PDUs are printed standard out and sent to Wireshark. - -### nordic_tx - -Transmitter class which consumes nordictap structs, generates Enhanced Shockburst packets, and produces a byte stream to be fed to a GFSK modulator. - -## python examples - -All python examples use the osmosdr_source/osmosdr_sink blocks, and are SDR agnostic. - -### nordic_receiver.py - -Single channel receiver. Listening on channel 4 (2404MHz) with a 2Mbps data rate, 5 byte address, and 2 byte CRC is invoked as follows: - -```./nordic_receiver.py --channel 4 --data_rate 2e6 --crc_length 2 --address_length 5 --samples_per_symbol 2 --gain 40``` - -### nordic_auto_ack.py - -Single channel receiver with auto-ACK. Listening (and ACKing) on channel 4 (2404MHz) with a 2Mbps data rate, 5 byte address, and 2 byte CRC is invoked as follows: - -```./nordic_auto_ack.py --channel 4 --data_rate 2e6 --crc_length 2 --address_length 5 --samples_per_symbol 2 --gain 40``` - -### nordic_sniffer_scanner.py - -Sweeping single channel receiver, which sweeps between channels 2-83 looking for Enhanced Shockburst packets. During receive activity, it camps on a given channel until idle. - -```./nordic_sniffer_scanner.py ``` - -### microsoft_mouse_sniffer.py - -Microsoft mouse/keyboard following receiver. When launched, this script will sweep between the 24 possible Microsoft wireless keyboard/mouse channels. When a device is found, it switches to that device's 4-channel group, sweeping between that set to follow the device. - -```./microsoft_mouse_sniffer.py ``` - -### nordic_channelized_receiver.py - -Channelized receiver example, which tunes to 2414MHz, and receives 2Mbps Enhanced Shockburst packets on channels 10, 14, and 18. - -```./nordic_channelized_receiver.py ``` - -### nordic_channelized_transmitter.py - -Channelized transmitter example, which tunes to 2414MHz, and transmits 2Mbps Enhanced Shockburst packets on channels 10, 14, and 18. - -```./nordic_channelized_transmitter.py ``` - -## wireshark dissector - -The wireshark dissector will display Enhanced Shockburst packets in Wireshark. The logic is very straightforward, and will be simple to extend to classify various device types. - -### wireshark/nordic_dissector.lua - -```wireshark -X lua_script:wireshark/nordic_dissector.lua -i lo -k -f udp ``` - -## nRF24LU1+ research firmware - -Corresponding research firmware for the nRF24LU1+ chips (including Logitech Unifying dongles) is available [here](https://github.com/BastilleResearch/nrf-research-firmware/). - -Documentation on the packet formats covered by the MouseJack and KeySniffer vulnerability sets is available [here](https://github.com/BastilleResearch/mousejack/tree/master/doc/pdf). diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt deleted file mode 100644 index d63bda1..0000000 --- a/apps/CMakeLists.txt +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -include(GrPython) - -GR_PYTHON_INSTALL( - PROGRAMS - DESTINATION bin -) diff --git a/cmake/Modules/CMakeParseArgumentsCopy.cmake b/cmake/Modules/CMakeParseArgumentsCopy.cmake deleted file mode 100644 index 7ce4c49..0000000 --- a/cmake/Modules/CMakeParseArgumentsCopy.cmake +++ /dev/null @@ -1,138 +0,0 @@ -# CMAKE_PARSE_ARGUMENTS( args...) -# -# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for -# parsing the arguments given to that macro or function. -# It processes the arguments and defines a set of variables which hold the -# values of the respective options. -# -# The argument contains all options for the respective macro, -# i.e. keywords which can be used when calling the macro without any value -# following, like e.g. the OPTIONAL keyword of the install() command. -# -# The argument contains all keywords for this macro -# which are followed by one value, like e.g. DESTINATION keyword of the -# install() command. -# -# The argument contains all keywords for this macro -# which can be followed by more than one value, like e.g. the TARGETS or -# FILES keywords of the install() command. -# -# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the -# keywords listed in , and -# a variable composed of the given -# followed by "_" and the name of the respective keyword. -# These variables will then hold the respective value from the argument list. -# For the keywords this will be TRUE or FALSE. -# -# All remaining arguments are collected in a variable -# _UNPARSED_ARGUMENTS, this can be checked afterwards to see whether -# your macro was called with unrecognized parameters. -# -# As an example here a my_install() macro, which takes similar arguments as the -# real install() command: -# -# function(MY_INSTALL) -# set(options OPTIONAL FAST) -# set(oneValueArgs DESTINATION RENAME) -# set(multiValueArgs TARGETS CONFIGURATIONS) -# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) -# ... -# -# Assume my_install() has been called like this: -# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub) -# -# After the cmake_parse_arguments() call the macro will have set the following -# variables: -# MY_INSTALL_OPTIONAL = TRUE -# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install() -# MY_INSTALL_DESTINATION = "bin" -# MY_INSTALL_RENAME = "" (was not used) -# MY_INSTALL_TARGETS = "foo;bar" -# MY_INSTALL_CONFIGURATIONS = "" (was not used) -# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL" -# -# You can the continue and process these variables. -# -# Keywords terminate lists of values, e.g. if directly after a one_value_keyword -# another recognized keyword follows, this is interpreted as the beginning of -# the new option. -# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in -# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would -# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor. - -#============================================================================= -# Copyright 2010 Alexander Neundorf -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - - -if(__CMAKE_PARSE_ARGUMENTS_INCLUDED) - return() -endif() -set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE) - - -function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames) - # first set all result variables to empty/FALSE - foreach(arg_name ${_singleArgNames} ${_multiArgNames}) - set(${prefix}_${arg_name}) - endforeach(arg_name) - - foreach(option ${_optionNames}) - set(${prefix}_${option} FALSE) - endforeach(option) - - set(${prefix}_UNPARSED_ARGUMENTS) - - set(insideValues FALSE) - set(currentArgName) - - # now iterate over all arguments and fill the result variables - foreach(currentArg ${ARGN}) - list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword - list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword - list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword - - if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1) - if(insideValues) - if("${insideValues}" STREQUAL "SINGLE") - set(${prefix}_${currentArgName} ${currentArg}) - set(insideValues FALSE) - elseif("${insideValues}" STREQUAL "MULTI") - list(APPEND ${prefix}_${currentArgName} ${currentArg}) - endif() - else(insideValues) - list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg}) - endif(insideValues) - else() - if(NOT ${optionIndex} EQUAL -1) - set(${prefix}_${currentArg} TRUE) - set(insideValues FALSE) - elseif(NOT ${singleArgIndex} EQUAL -1) - set(currentArgName ${currentArg}) - set(${prefix}_${currentArgName}) - set(insideValues "SINGLE") - elseif(NOT ${multiArgIndex} EQUAL -1) - set(currentArgName ${currentArg}) - set(${prefix}_${currentArgName}) - set(insideValues "MULTI") - endif() - endif() - - endforeach(currentArg) - - # propagate the result variables to the caller: - foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames}) - set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE) - endforeach(arg_name) - set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE) - -endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs) diff --git a/cmake/Modules/FindCppUnit.cmake b/cmake/Modules/FindCppUnit.cmake deleted file mode 100644 index f93ade3..0000000 --- a/cmake/Modules/FindCppUnit.cmake +++ /dev/null @@ -1,39 +0,0 @@ -# http://www.cmake.org/pipermail/cmake/2006-October/011446.html -# Modified to use pkg config and use standard var names - -# -# Find the CppUnit includes and library -# -# This module defines -# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc. -# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit. -# CPPUNIT_FOUND, If false, do not try to use CppUnit. - -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_CPPUNIT "cppunit") - -FIND_PATH(CPPUNIT_INCLUDE_DIRS - NAMES cppunit/TestCase.h - HINTS ${PC_CPPUNIT_INCLUDE_DIR} - ${CMAKE_INSTALL_PREFIX}/include - PATHS - /usr/local/include - /usr/include -) - -FIND_LIBRARY(CPPUNIT_LIBRARIES - NAMES cppunit - HINTS ${PC_CPPUNIT_LIBDIR} - ${CMAKE_INSTALL_PREFIX}/lib - ${CMAKE_INSTALL_PREFIX}/lib64 - PATHS - ${CPPUNIT_INCLUDE_DIRS}/../lib - /usr/local/lib - /usr/lib -) - -LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS}) - -INCLUDE(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) -MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) diff --git a/cmake/Modules/FindGnuradioRuntime.cmake b/cmake/Modules/FindGnuradioRuntime.cmake deleted file mode 100644 index afed684..0000000 --- a/cmake/Modules/FindGnuradioRuntime.cmake +++ /dev/null @@ -1,36 +0,0 @@ -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime) - -if(PC_GNURADIO_RUNTIME_FOUND) - # look for include files - FIND_PATH( - GNURADIO_RUNTIME_INCLUDE_DIRS - NAMES gnuradio/top_block.h - HINTS $ENV{GNURADIO_RUNTIME_DIR}/include - ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS} - ${CMAKE_INSTALL_PREFIX}/include - PATHS /usr/local/include - /usr/include - ) - - # look for libs - FIND_LIBRARY( - GNURADIO_RUNTIME_LIBRARIES - NAMES gnuradio-runtime - HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib - ${PC_GNURADIO_RUNTIME_LIBDIR} - ${CMAKE_INSTALL_PREFIX}/lib/ - ${CMAKE_INSTALL_PREFIX}/lib64/ - PATHS /usr/local/lib - /usr/local/lib64 - /usr/lib - /usr/lib64 - ) - - set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND}) -endif(PC_GNURADIO_RUNTIME_FOUND) - -INCLUDE(FindPackageHandleStandardArgs) -# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used. -FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES) -MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS) diff --git a/cmake/Modules/GrMiscUtils.cmake b/cmake/Modules/GrMiscUtils.cmake deleted file mode 100644 index 7b8b7ed..0000000 --- a/cmake/Modules/GrMiscUtils.cmake +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE) - return() -endif() -set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE) - -######################################################################## -# Set global variable macro. -# Used for subdirectories to export settings. -# Example: include and library paths. -######################################################################## -function(GR_SET_GLOBAL var) - set(${var} ${ARGN} CACHE INTERNAL "" FORCE) -endfunction(GR_SET_GLOBAL) - -######################################################################## -# Set the pre-processor definition if the condition is true. -# - def the pre-processor definition to set and condition name -######################################################################## -function(GR_ADD_COND_DEF def) - if(${def}) - add_definitions(-D${def}) - endif(${def}) -endfunction(GR_ADD_COND_DEF) - -######################################################################## -# Check for a header and conditionally set a compile define. -# - hdr the relative path to the header file -# - def the pre-processor definition to set -######################################################################## -function(GR_CHECK_HDR_N_DEF hdr def) - include(CheckIncludeFileCXX) - CHECK_INCLUDE_FILE_CXX(${hdr} ${def}) - GR_ADD_COND_DEF(${def}) -endfunction(GR_CHECK_HDR_N_DEF) - -######################################################################## -# Include subdirectory macro. -# Sets the CMake directory variables, -# includes the subdirectory CMakeLists.txt, -# resets the CMake directory variables. -# -# This macro includes subdirectories rather than adding them -# so that the subdirectory can affect variables in the level above. -# This provides a work-around for the lack of convenience libraries. -# This way a subdirectory can append to the list of library sources. -######################################################################## -macro(GR_INCLUDE_SUBDIRECTORY subdir) - #insert the current directories on the front of the list - list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR}) - list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR}) - - #set the current directories to the names of the subdirs - set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) - set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir}) - - #include the subdirectory CMakeLists to run it - file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) - include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) - - #reset the value of the current directories - list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR) - list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR) - - #pop the subdir names of the front of the list - list(REMOVE_AT _cmake_source_dirs 0) - list(REMOVE_AT _cmake_binary_dirs 0) -endmacro(GR_INCLUDE_SUBDIRECTORY) - -######################################################################## -# Check if a compiler flag works and conditionally set a compile define. -# - flag the compiler flag to check for -# - have the variable to set with result -######################################################################## -macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have) - include(CheckCXXCompilerFlag) - CHECK_CXX_COMPILER_FLAG(${flag} ${have}) - if(${have}) - if(${CMAKE_VERSION} VERSION_GREATER "2.8.4") - STRING(FIND "${CMAKE_CXX_FLAGS}" "${flag}" flag_dup) - if(${flag_dup} EQUAL -1) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") - endif(${flag_dup} EQUAL -1) - endif(${CMAKE_VERSION} VERSION_GREATER "2.8.4") - endif(${have}) -endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE) - -######################################################################## -# Generates the .la libtool file -# This appears to generate libtool files that cannot be used by auto*. -# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest]) -# Notice: there is not COMPONENT option, these will not get distributed. -######################################################################## -function(GR_LIBTOOL) - if(NOT DEFINED GENERATE_LIBTOOL) - set(GENERATE_LIBTOOL OFF) #disabled by default - endif() - - if(GENERATE_LIBTOOL) - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN}) - - find_program(LIBTOOL libtool) - if(LIBTOOL) - include(CMakeMacroLibtoolFile) - CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION}) - endif(LIBTOOL) - endif(GENERATE_LIBTOOL) - -endfunction(GR_LIBTOOL) - -######################################################################## -# Do standard things to the library target -# - set target properties -# - make install rules -# Also handle gnuradio custom naming conventions w/ extras mode. -######################################################################## -function(GR_LIBRARY_FOO target) - #parse the arguments for component names - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN}) - - #set additional target properties - set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER}) - - #install the generated files like so... - install(TARGETS ${target} - LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file - ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file - RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file - ) - - #extras mode enabled automatically on linux - if(NOT DEFINED LIBRARY_EXTRAS) - set(LIBRARY_EXTRAS ${LINUX}) - endif() - - #special extras mode to enable alternative naming conventions - if(LIBRARY_EXTRAS) - - #create .la file before changing props - GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR}) - - #give the library a special name with ultra-zero soversion - set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0") - set(target_name lib${target}-${LIBVER}.so.0.0.0) - - #custom command to generate symlinks - add_custom_command( - TARGET ${target} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so - COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 - COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install - ) - - #and install the extra symlinks - install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so - ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 - DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} - ) - - endif(LIBRARY_EXTRAS) -endfunction(GR_LIBRARY_FOO) - -######################################################################## -# Create a dummy custom command that depends on other targets. -# Usage: -# GR_GEN_TARGET_DEPS(unique_name target_deps ...) -# ADD_CUSTOM_COMMAND( ${target_deps}) -# -# Custom command cant depend on targets, but can depend on executables, -# and executables can depend on targets. So this is the process: -######################################################################## -function(GR_GEN_TARGET_DEPS name var) - file( - WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in - "int main(void){return 0;}\n" - ) - execute_process( - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in - ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp - ) - add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp) - if(ARGN) - add_dependencies(${name} ${ARGN}) - endif(ARGN) - - if(CMAKE_CROSSCOMPILING) - set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross - else() - set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE) - endif() -endfunction(GR_GEN_TARGET_DEPS) - -######################################################################## -# Control use of gr_logger -# Usage: -# GR_LOGGING() -# -# Will set ENABLE_GR_LOG to 1 by default. -# Can manually set with -DENABLE_GR_LOG=0|1 -######################################################################## -function(GR_LOGGING) - find_package(Log4cpp) - - OPTION(ENABLE_GR_LOG "Use gr_logger" ON) - if(ENABLE_GR_LOG) - # If gr_logger is enabled, make it usable - add_definitions( -DENABLE_GR_LOG ) - - # also test LOG4CPP; if we have it, use this version of the logger - # otherwise, default to the stdout/stderr model. - if(LOG4CPP_FOUND) - SET(HAVE_LOG4CPP True CACHE INTERNAL "" FORCE) - add_definitions( -DHAVE_LOG4CPP ) - else(not LOG4CPP_FOUND) - SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) - SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) - endif(LOG4CPP_FOUND) - - SET(ENABLE_GR_LOG ${ENABLE_GR_LOG} CACHE INTERNAL "" FORCE) - - else(ENABLE_GR_LOG) - SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) - SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) - endif(ENABLE_GR_LOG) - - message(STATUS "ENABLE_GR_LOG set to ${ENABLE_GR_LOG}.") - message(STATUS "HAVE_LOG4CPP set to ${HAVE_LOG4CPP}.") - message(STATUS "LOG4CPP_LIBRARIES set to ${LOG4CPP_LIBRARIES}.") - -endfunction(GR_LOGGING) - -######################################################################## -# Run GRCC to compile .grc files into .py files. -# -# Usage: GRCC(filename, directory) -# - filenames: List of file name of .grc file -# - directory: directory of built .py file - usually in -# ${CMAKE_CURRENT_BINARY_DIR} -# - Sets PYFILES: output converted GRC file names to Python files. -######################################################################## -function(GRCC) - # Extract directory from list of args, remove it for the list of filenames. - list(GET ARGV -1 directory) - list(REMOVE_AT ARGV -1) - set(filenames ${ARGV}) - file(MAKE_DIRECTORY ${directory}) - - SET(GRCC_COMMAND ${CMAKE_SOURCE_DIR}/gr-utils/python/grcc) - - # GRCC uses some stuff in grc and gnuradio-runtime, so we force - # the known paths here - list(APPEND PYTHONPATHS - ${CMAKE_SOURCE_DIR} - ${CMAKE_SOURCE_DIR}/gnuradio-runtime/python - ${CMAKE_SOURCE_DIR}/gnuradio-runtime/lib/swig - ${CMAKE_BINARY_DIR}/gnuradio-runtime/lib/swig - ) - - if(WIN32) - #SWIG generates the python library files into a subdirectory. - #Therefore, we must append this subdirectory into PYTHONPATH. - #Only do this for the python directories matching the following: - foreach(pydir ${PYTHONPATHS}) - get_filename_component(name ${pydir} NAME) - if(name MATCHES "^(swig|lib|src)$") - list(APPEND PYTHONPATHS ${pydir}/${CMAKE_BUILD_TYPE}) - endif() - endforeach(pydir) - endif(WIN32) - - file(TO_NATIVE_PATH "${PYTHONPATHS}" pypath) - - if(UNIX) - list(APPEND pypath "$PYTHONPATH") - string(REPLACE ";" ":" pypath "${pypath}") - set(ENV{PYTHONPATH} ${pypath}) - endif(UNIX) - - if(WIN32) - list(APPEND pypath "%PYTHONPATH%") - string(REPLACE ";" "\\;" pypath "${pypath}") - #list(APPEND environs "PYTHONPATH=${pypath}") - set(ENV{PYTHONPATH} ${pypath}) - endif(WIN32) - - foreach(f ${filenames}) - execute_process( - COMMAND ${GRCC_COMMAND} -d ${directory} ${f} - ) - string(REPLACE ".grc" ".py" pyfile "${f}") - string(REPLACE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" pyfile "${pyfile}") - list(APPEND pyfiles ${pyfile}) - endforeach(f) - - set(PYFILES ${pyfiles} PARENT_SCOPE) -endfunction(GRCC) - -######################################################################## -# Check if HAVE_PTHREAD_SETSCHEDPARAM and HAVE_SCHED_SETSCHEDULER -# should be defined -######################################################################## -macro(GR_CHECK_LINUX_SCHED_AVAIL) -set(CMAKE_REQUIRED_LIBRARIES -lpthread) - CHECK_CXX_SOURCE_COMPILES(" - #include - int main(){ - pthread_t pthread; - pthread_setschedparam(pthread, 0, 0); - return 0; - } " HAVE_PTHREAD_SETSCHEDPARAM - ) - GR_ADD_COND_DEF(HAVE_PTHREAD_SETSCHEDPARAM) - - CHECK_CXX_SOURCE_COMPILES(" - #include - int main(){ - pid_t pid; - sched_setscheduler(pid, 0, 0); - return 0; - } " HAVE_SCHED_SETSCHEDULER - ) - GR_ADD_COND_DEF(HAVE_SCHED_SETSCHEDULER) -endmacro(GR_CHECK_LINUX_SCHED_AVAIL) - -######################################################################## -# Macros to generate source and header files from template -######################################################################## -macro(GR_EXPAND_X_H component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_dict2(name, sig, '${component}') - build_utils.expand_template(d, inp) -") - - #make a list of all the generated headers - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the headers - add_custom_command( - OUTPUT ${expanded_files_h} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.h.t ${ARGN} - ) - - #install rules for the generated headers - list(APPEND generated_includes ${expanded_files_h}) - -endmacro(GR_EXPAND_X_H) - -macro(GR_EXPAND_X_CC_H component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_impl_dict2(name, sig, '${component}') - build_utils.expand_template(d, inp) -") - - #make a list of all the generated files - unset(expanded_files_cc) - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_cc ${CMAKE_CURRENT_BINARY_DIR}/${name}.cc) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the source files - add_custom_command( - OUTPUT ${expanded_files_cc} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.cc.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.cc.t ${ARGN} - ) - - #create a command to generate the header files - add_custom_command( - OUTPUT ${expanded_files_h} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.h.t ${ARGN} - ) - - #make source files depends on headers to force generation - set_source_files_properties(${expanded_files_cc} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" - ) - - #install rules for the generated files - list(APPEND generated_sources ${expanded_files_cc}) - list(APPEND generated_headers ${expanded_files_h}) - -endmacro(GR_EXPAND_X_CC_H) - -macro(GR_EXPAND_X_CC_H_IMPL component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_dict(name, sig, '${component}') - build_utils.expand_template(d, inp, '_impl') -") - - #make a list of all the generated files - unset(expanded_files_cc_impl) - unset(expanded_files_h_impl) - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_cc_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.cc) - list(APPEND expanded_files_h_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.h) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/../include/gnuradio/${component}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the _impl.cc files - add_custom_command( - OUTPUT ${expanded_files_cc_impl} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.cc.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}_impl.cc.t ${ARGN} - ) - - #create a command to generate the _impl.h files - add_custom_command( - OUTPUT ${expanded_files_h_impl} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}_impl.h.t ${ARGN} - ) - - #make _impl.cc source files depend on _impl.h to force generation - set_source_files_properties(${expanded_files_cc_impl} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h_impl}" - ) - - #make _impl.h source files depend on headers to force generation - set_source_files_properties(${expanded_files_h_impl} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" - ) - - #install rules for the generated files - list(APPEND generated_sources ${expanded_files_cc_impl}) - list(APPEND generated_headers ${expanded_files_h_impl}) - -endmacro(GR_EXPAND_X_CC_H_IMPL) diff --git a/cmake/Modules/GrPlatform.cmake b/cmake/Modules/GrPlatform.cmake deleted file mode 100644 index 1139047..0000000 --- a/cmake/Modules/GrPlatform.cmake +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE) - return() -endif() -set(__INCLUDED_GR_PLATFORM_CMAKE TRUE) - -######################################################################## -# Setup additional defines for OS types -######################################################################## -if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - set(LINUX TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/debian_version") - set(DEBIAN TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/redhat-release") - set(REDHAT TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/slackware-version") - set(SLACKWARE TRUE) -endif() - -######################################################################## -# when the library suffix should be 64 (applies to redhat linux family) -######################################################################## -if (REDHAT OR SLACKWARE) - set(LIB64_CONVENTION TRUE) -endif() - -if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$") - set(LIB_SUFFIX 64) -endif() -set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix") diff --git a/cmake/Modules/GrPython.cmake b/cmake/Modules/GrPython.cmake deleted file mode 100644 index ca6c34e..0000000 --- a/cmake/Modules/GrPython.cmake +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -if(DEFINED __INCLUDED_GR_PYTHON_CMAKE) - return() -endif() -set(__INCLUDED_GR_PYTHON_CMAKE TRUE) - -######################################################################## -# Setup the python interpreter: -# This allows the user to specify a specific interpreter, -# or finds the interpreter via the built-in cmake module. -######################################################################## -#this allows the user to override PYTHON_EXECUTABLE -if(PYTHON_EXECUTABLE) - - set(PYTHONINTERP_FOUND TRUE) - -#otherwise if not set, try to automatically find it -else(PYTHON_EXECUTABLE) - - #use the built-in find script - find_package(PythonInterp 2) - - #and if that fails use the find program routine - if(NOT PYTHONINTERP_FOUND) - find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5) - if(PYTHON_EXECUTABLE) - set(PYTHONINTERP_FOUND TRUE) - endif(PYTHON_EXECUTABLE) - endif(NOT PYTHONINTERP_FOUND) - -endif(PYTHON_EXECUTABLE) - -if (CMAKE_CROSSCOMPILING) - set(QA_PYTHON_EXECUTABLE "/usr/bin/python") -else (CMAKE_CROSSCOMPILING) - set(QA_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE}) -endif(CMAKE_CROSSCOMPILING) - -#make the path to the executable appear in the cmake gui -set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter") -set(QA_PYTHON_EXECUTABLE ${QA_PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter for QA tests") - -#make sure we can use -B with python (introduced in 2.6) -if(PYTHON_EXECUTABLE) - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -B -c "" - OUTPUT_QUIET ERROR_QUIET - RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT - ) - if(PYTHON_HAS_DASH_B_RESULT EQUAL 0) - set(PYTHON_DASH_B "-B") - endif() -endif(PYTHON_EXECUTABLE) - -######################################################################## -# Check for the existence of a python module: -# - desc a string description of the check -# - mod the name of the module to import -# - cmd an additional command to run -# - have the result variable to set -######################################################################## -macro(GR_PYTHON_CHECK_MODULE desc mod cmd have) - message(STATUS "") - message(STATUS "Python checking for ${desc}") - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -c " -######################################### -try: - import ${mod} - assert ${cmd} -except ImportError, AssertionError: exit(-1) -except: pass -#########################################" - RESULT_VARIABLE ${have} - ) - if(${have} EQUAL 0) - message(STATUS "Python checking for ${desc} - found") - set(${have} TRUE) - else(${have} EQUAL 0) - message(STATUS "Python checking for ${desc} - not found") - set(${have} FALSE) - endif(${have} EQUAL 0) -endmacro(GR_PYTHON_CHECK_MODULE) - -######################################################################## -# Sets the python installation directory GR_PYTHON_DIR -######################################################################## -if(NOT DEFINED GR_PYTHON_DIR) -execute_process(COMMAND ${PYTHON_EXECUTABLE} -c " -from distutils import sysconfig -print sysconfig.get_python_lib(plat_specific=True, prefix='') -" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE -) -endif() -file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR) - -######################################################################## -# Create an always-built target with a unique name -# Usage: GR_UNIQUE_TARGET( ) -######################################################################## -function(GR_UNIQUE_TARGET desc) - file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib -unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] -print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))" - OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE) - add_custom_target(${_target} ALL DEPENDS ${ARGN}) -endfunction(GR_UNIQUE_TARGET) - -######################################################################## -# Install python sources (also builds and installs byte-compiled python) -######################################################################## -function(GR_PYTHON_INSTALL) - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN}) - - #################################################################### - if(GR_PYTHON_INSTALL_FILES) - #################################################################### - install(${ARGN}) #installs regular python files - - #create a list of all generated files - unset(pysrcfiles) - unset(pycfiles) - unset(pyofiles) - foreach(pyfile ${GR_PYTHON_INSTALL_FILES}) - get_filename_component(pyfile ${pyfile} ABSOLUTE) - list(APPEND pysrcfiles ${pyfile}) - - #determine if this file is in the source or binary directory - file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile}) - string(LENGTH "${source_rel_path}" source_rel_path_len) - file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile}) - string(LENGTH "${binary_rel_path}" binary_rel_path_len) - - #and set the generated path appropriately - if(${source_rel_path_len} GREATER ${binary_rel_path_len}) - set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path}) - else() - set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path}) - endif() - list(APPEND pycfiles ${pygenfile}c) - list(APPEND pyofiles ${pygenfile}o) - - #ensure generation path exists - get_filename_component(pygen_path ${pygenfile} PATH) - file(MAKE_DIRECTORY ${pygen_path}) - - endforeach(pyfile) - - #the command to generate the pyc files - add_custom_command( - DEPENDS ${pysrcfiles} OUTPUT ${pycfiles} - COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles} - ) - - #the command to generate the pyo files - add_custom_command( - DEPENDS ${pysrcfiles} OUTPUT ${pyofiles} - COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles} - ) - - #create install rule and add generated files to target list - set(python_install_gen_targets ${pycfiles} ${pyofiles}) - install(FILES ${python_install_gen_targets} - DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} - COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} - ) - - #################################################################### - elseif(GR_PYTHON_INSTALL_PROGRAMS) - #################################################################### - file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native) - - if (CMAKE_CROSSCOMPILING) - set(pyexe_native "/usr/bin/env python") - endif() - - foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS}) - get_filename_component(pyfile_name ${pyfile} NAME) - get_filename_component(pyfile ${pyfile} ABSOLUTE) - string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe") - list(APPEND python_install_gen_targets ${pyexefile}) - - get_filename_component(pyexefile_path ${pyexefile} PATH) - file(MAKE_DIRECTORY ${pyexefile_path}) - - add_custom_command( - OUTPUT ${pyexefile} DEPENDS ${pyfile} - COMMAND ${PYTHON_EXECUTABLE} -c - "import re; R=re.compile('^\#!.*$\\n',flags=re.MULTILINE); open('${pyexefile}','w').write('\#!${pyexe_native}\\n'+R.sub('',open('${pyfile}','r').read()))" - COMMENT "Shebangin ${pyfile_name}" - VERBATIM - ) - - #on windows, python files need an extension to execute - get_filename_component(pyfile_ext ${pyfile} EXT) - if(WIN32 AND NOT pyfile_ext) - set(pyfile_name "${pyfile_name}.py") - endif() - - install(PROGRAMS ${pyexefile} RENAME ${pyfile_name} - DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} - COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} - ) - endforeach(pyfile) - - endif() - - GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets}) - -endfunction(GR_PYTHON_INSTALL) - -######################################################################## -# Write the python helper script that generates byte code files -######################################################################## -file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py " -import sys, py_compile -files = sys.argv[1:] -srcs, gens = files[:len(files)/2], files[len(files)/2:] -for src, gen in zip(srcs, gens): - py_compile.compile(file=src, cfile=gen, doraise=True) -") diff --git a/cmake/Modules/GrSwig.cmake b/cmake/Modules/GrSwig.cmake deleted file mode 100644 index 59720ca..0000000 --- a/cmake/Modules/GrSwig.cmake +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -if(DEFINED __INCLUDED_GR_SWIG_CMAKE) - return() -endif() -set(__INCLUDED_GR_SWIG_CMAKE TRUE) - -include(GrPython) - -######################################################################## -# Builds a swig documentation file to be generated into python docstrings -# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....) -# -# Set the following variable to specify extra dependent targets: -# - GR_SWIG_DOCS_SOURCE_DEPS -# - GR_SWIG_DOCS_TARGET_DEPS -######################################################################## -function(GR_SWIG_MAKE_DOCS output_file) - if(ENABLE_DOXYGEN) - - #setup the input files variable list, quote formated - set(input_files) - unset(INPUT_PATHS) - foreach(input_path ${ARGN}) - if(IS_DIRECTORY ${input_path}) #when input path is a directory - file(GLOB input_path_h_files ${input_path}/*.h) - else() #otherwise its just a file, no glob - set(input_path_h_files ${input_path}) - endif() - list(APPEND input_files ${input_path_h_files}) - set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"") - endforeach(input_path) - - #determine the output directory - get_filename_component(name ${output_file} NAME_WE) - get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH) - set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs) - make_directory(${OUTPUT_DIRECTORY}) - - #generate the Doxyfile used by doxygen - configure_file( - ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in - ${OUTPUT_DIRECTORY}/Doxyfile - @ONLY) - - #Create a dummy custom command that depends on other targets - include(GrMiscUtils) - GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS}) - - #call doxygen on the Doxyfile + input headers - add_custom_command( - OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml - DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps} - COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile - COMMENT "Generating doxygen xml for ${name} docs" - ) - - #call the swig_doc script on the xml files - add_custom_command( - OUTPUT ${output_file} - DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py - ${OUTPUT_DIRECTORY}/xml - ${output_file} - COMMENT "Generating python docstrings for ${name}" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen - ) - - else(ENABLE_DOXYGEN) - file(WRITE ${output_file} "\n") #no doxygen -> empty file - endif(ENABLE_DOXYGEN) -endfunction(GR_SWIG_MAKE_DOCS) - -######################################################################## -# Build a swig target for the common gnuradio use case. Usage: -# GR_SWIG_MAKE(target ifile ifile ifile...) -# -# Set the following variables before calling: -# - GR_SWIG_FLAGS -# - GR_SWIG_INCLUDE_DIRS -# - GR_SWIG_LIBRARIES -# - GR_SWIG_SOURCE_DEPS -# - GR_SWIG_TARGET_DEPS -# - GR_SWIG_DOC_FILE -# - GR_SWIG_DOC_DIRS -######################################################################## -macro(GR_SWIG_MAKE name) - set(ifiles ${ARGN}) - - # Shimming this in here to take care of a SWIG bug with handling - # vector and vector (on 32-bit machines) and - # vector (on 64-bit machines). Use this to test - # the size of size_t, then set SIZE_T_32 if it's a 32-bit machine - # or not if it's 64-bit. The logic in gr_type.i handles the rest. - INCLUDE(CheckTypeSize) - CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T) - CHECK_TYPE_SIZE("unsigned int" SIZEOF_UINT) - if(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) - list(APPEND GR_SWIG_FLAGS -DSIZE_T_32) - endif(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) - - #do swig doc generation if specified - if(GR_SWIG_DOC_FILE) - set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS}) - list(APPEND GR_SWIG_DOCS_TARGET_DEPS ${GR_SWIG_TARGET_DEPS}) - GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS}) - add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE}) - list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc ${GR_RUNTIME_SWIG_DOC_FILE}) - endif() - - #append additional include directories - find_package(PythonLibs 2) - list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs) - list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) - - #prepend local swig directories - list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_SOURCE_DIR}) - list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_BINARY_DIR}) - - #determine include dependencies for swig file - execute_process( - COMMAND ${PYTHON_EXECUTABLE} - ${CMAKE_BINARY_DIR}/get_swig_deps.py - "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}" - OUTPUT_STRIP_TRAILING_WHITESPACE - OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - ) - - #Create a dummy custom command that depends on other targets - include(GrMiscUtils) - GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS}) - set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag) - add_custom_command( - OUTPUT ${tag_file} - DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps} - COMMAND ${CMAKE_COMMAND} -E touch ${tag_file} - ) - - #append the specified include directories - include_directories(${GR_SWIG_INCLUDE_DIRS}) - list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file}) - - #setup the swig flags with flags and include directories - set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS}) - foreach(dir ${GR_SWIG_INCLUDE_DIRS}) - list(APPEND CMAKE_SWIG_FLAGS "-I${dir}") - endforeach(dir) - - #set the C++ property on the swig .i file so it builds - set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON) - - #setup the actual swig library target to be built - include(UseSWIG) - SWIG_ADD_MODULE(${name} python ${ifiles}) - SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES}) - if(${name} STREQUAL "runtime_swig") - SET_TARGET_PROPERTIES(${SWIG_MODULE_runtime_swig_REAL_NAME} PROPERTIES DEFINE_SYMBOL "gnuradio_runtime_EXPORTS") - endif(${name} STREQUAL "runtime_swig") - -endmacro(GR_SWIG_MAKE) - -######################################################################## -# Install swig targets generated by GR_SWIG_MAKE. Usage: -# GR_SWIG_INSTALL( -# TARGETS target target target... -# [DESTINATION destination] -# [COMPONENT component] -# ) -######################################################################## -macro(GR_SWIG_INSTALL) - - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN}) - - foreach(name ${GR_SWIG_INSTALL_TARGETS}) - install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME} - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - COMPONENT ${GR_SWIG_INSTALL_COMPONENT} - ) - - include(GrPython) - GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - COMPONENT ${GR_SWIG_INSTALL_COMPONENT} - ) - - GR_LIBTOOL( - TARGET ${SWIG_MODULE_${name}_REAL_NAME} - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - ) - - endforeach(name) - -endmacro(GR_SWIG_INSTALL) - -######################################################################## -# Generate a python file that can determine swig dependencies. -# Used by the make macro above to determine extra dependencies. -# When you build C++, CMake figures out the header dependencies. -# This code essentially performs that logic for swig includes. -######################################################################## -file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py " - -import os, sys, re - -i_include_matcher = re.compile('%(include|import)\\s*[<|\"](.*)[>|\"]') -h_include_matcher = re.compile('#(include)\\s*[<|\"](.*)[>|\"]') -include_dirs = sys.argv[2].split(';') - -def get_swig_incs(file_path): - if file_path.endswith('.i'): matcher = i_include_matcher - else: matcher = h_include_matcher - file_contents = open(file_path, 'r').read() - return matcher.findall(file_contents, re.MULTILINE) - -def get_swig_deps(file_path, level): - deps = [file_path] - if level == 0: return deps - for keyword, inc_file in get_swig_incs(file_path): - for inc_dir in include_dirs: - inc_path = os.path.join(inc_dir, inc_file) - if not os.path.exists(inc_path): continue - deps.extend(get_swig_deps(inc_path, level-1)) - break #found, we dont search in lower prio inc dirs - return deps - -if __name__ == '__main__': - ifiles = sys.argv[1].split(';') - deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], []) - #sys.stderr.write(';'.join(set(deps)) + '\\n\\n') - print(';'.join(set(deps))) -") diff --git a/cmake/Modules/GrTest.cmake b/cmake/Modules/GrTest.cmake deleted file mode 100644 index 8660715..0000000 --- a/cmake/Modules/GrTest.cmake +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -if(DEFINED __INCLUDED_GR_TEST_CMAKE) - return() -endif() -set(__INCLUDED_GR_TEST_CMAKE TRUE) - -######################################################################## -# Add a unit test and setup the environment for a unit test. -# Takes the same arguments as the ADD_TEST function. -# -# Before calling set the following variables: -# GR_TEST_TARGET_DEPS - built targets for the library path -# GR_TEST_LIBRARY_DIRS - directories for the library path -# GR_TEST_PYTHON_DIRS - directories for the python path -# GR_TEST_ENVIRONS - other environment key/value pairs -######################################################################## -function(GR_ADD_TEST test_name) - - #Ensure that the build exe also appears in the PATH. - list(APPEND GR_TEST_TARGET_DEPS ${ARGN}) - - #In the land of windows, all libraries must be in the PATH. - #Since the dependent libraries are not yet installed, - #we must manually set them in the PATH to run tests. - #The following appends the path of a target dependency. - foreach(target ${GR_TEST_TARGET_DEPS}) - get_target_property(location ${target} LOCATION) - if(location) - get_filename_component(path ${location} PATH) - string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path}) - list(APPEND GR_TEST_LIBRARY_DIRS ${path}) - endif(location) - endforeach(target) - - if(WIN32) - #SWIG generates the python library files into a subdirectory. - #Therefore, we must append this subdirectory into PYTHONPATH. - #Only do this for the python directories matching the following: - foreach(pydir ${GR_TEST_PYTHON_DIRS}) - get_filename_component(name ${pydir} NAME) - if(name MATCHES "^(swig|lib|src)$") - list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE}) - endif() - endforeach(pydir) - endif(WIN32) - - file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir) - file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list? - file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list? - - set(environs "VOLK_GENERIC=1" "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}") - list(APPEND environs ${GR_TEST_ENVIRONS}) - - #http://www.cmake.org/pipermail/cmake/2009-May/029464.html - #Replaced this add test + set environs code with the shell script generation. - #Its nicer to be able to manually run the shell script to diagnose problems. - #ADD_TEST(${ARGV}) - #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}") - - if(UNIX) - set(LD_PATH_VAR "LD_LIBRARY_PATH") - if(APPLE) - set(LD_PATH_VAR "DYLD_LIBRARY_PATH") - endif() - - set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH") - list(APPEND libpath "$${LD_PATH_VAR}") - list(APPEND pypath "$PYTHONPATH") - - #replace list separator with the path separator - string(REPLACE ";" ":" libpath "${libpath}") - string(REPLACE ";" ":" pypath "${pypath}") - list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}") - - #generate a bat file that sets the environment and runs the test - if (CMAKE_CROSSCOMPILING) - set(SHELL "/bin/sh") - else(CMAKE_CROSSCOMPILING) - find_program(SHELL sh) - endif(CMAKE_CROSSCOMPILING) - set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh) - file(WRITE ${sh_file} "#!${SHELL}\n") - #each line sets an environment variable - foreach(environ ${environs}) - file(APPEND ${sh_file} "export ${environ}\n") - endforeach(environ) - #load the command to run with its arguments - foreach(arg ${ARGN}) - file(APPEND ${sh_file} "${arg} ") - endforeach(arg) - file(APPEND ${sh_file} "\n") - - #make the shell file executable - execute_process(COMMAND chmod +x ${sh_file}) - - add_test(${test_name} ${SHELL} ${sh_file}) - - endif(UNIX) - - if(WIN32) - list(APPEND libpath ${DLL_PATHS} "%PATH%") - list(APPEND pypath "%PYTHONPATH%") - - #replace list separator with the path separator (escaped) - string(REPLACE ";" "\\;" libpath "${libpath}") - string(REPLACE ";" "\\;" pypath "${pypath}") - list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}") - - #generate a bat file that sets the environment and runs the test - set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat) - file(WRITE ${bat_file} "@echo off\n") - #each line sets an environment variable - foreach(environ ${environs}) - file(APPEND ${bat_file} "SET ${environ}\n") - endforeach(environ) - #load the command to run with its arguments - foreach(arg ${ARGN}) - file(APPEND ${bat_file} "${arg} ") - endforeach(arg) - file(APPEND ${bat_file} "\n") - - add_test(${test_name} ${bat_file}) - endif(WIN32) - -endfunction(GR_ADD_TEST) diff --git a/cmake/Modules/UseSWIG.cmake b/cmake/Modules/UseSWIG.cmake deleted file mode 100644 index c0f1728..0000000 --- a/cmake/Modules/UseSWIG.cmake +++ /dev/null @@ -1,304 +0,0 @@ -# - SWIG module for CMake -# Defines the following macros: -# SWIG_ADD_MODULE(name language [ files ]) -# - Define swig module with given name and specified language -# SWIG_LINK_LIBRARIES(name [ libraries ]) -# - Link libraries to swig module -# All other macros are for internal use only. -# To get the actual name of the swig module, -# use: ${SWIG_MODULE_${name}_REAL_NAME}. -# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify -# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add -# special flags to all swig calls. -# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify -# where to write all the swig generated module (swig -outdir option) -# The name-specific variable SWIG_MODULE__EXTRA_DEPS may be used -# to specify extra dependencies for the generated modules. -# If the source file generated by swig need some special flag you can use -# set_source_files_properties( ${swig_generated_file_fullname} -# PROPERTIES COMPILE_FLAGS "-bla") - - -#============================================================================= -# Copyright 2004-2009 Kitware, Inc. -# Copyright 2009 Mathieu Malaterre -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - -set(SWIG_CXX_EXTENSION "cxx") -set(SWIG_EXTRA_LIBRARIES "") - -set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py") - -# -# For given swig module initialize variables associated with it -# -macro(SWIG_MODULE_INITIALIZE name language) - string(TOUPPER "${language}" swig_uppercase_language) - string(TOLOWER "${language}" swig_lowercase_language) - set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}") - set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}") - - set(SWIG_MODULE_${name}_REAL_NAME "${name}") - if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN") - message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found") - elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON") - # when swig is used without the -interface it will produce in the module.py - # a 'import _modulename' statement, which implies having a corresponding - # _modulename.so (*NIX), _modulename.pyd (Win32). - set(SWIG_MODULE_${name}_REAL_NAME "_${name}") - elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL") - set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow") - endif() -endmacro() - -# -# For a given language, input file, and output file, determine extra files that -# will be generated. This is internal swig macro. -# - -macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile) - set(${outfiles} "") - get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename - ${infile} SWIG_MODULE_NAME) - if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND") - get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE) - endif() - foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION}) - set(${outfiles} ${${outfiles}} - "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}") - endforeach() -endmacro() - -# -# Take swig (*.i) file and add proper custom commands for it -# -macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile) - set(swig_full_infile ${infile}) - get_filename_component(swig_source_file_path "${infile}" PATH) - get_filename_component(swig_source_file_name_we "${infile}" NAME_WE) - get_source_file_property(swig_source_file_generated ${infile} GENERATED) - get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS) - get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS) - if("${swig_source_file_flags}" STREQUAL "NOTFOUND") - set(swig_source_file_flags "") - endif() - set(swig_source_file_fullname "${infile}") - if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}") - string(REGEX REPLACE - "^${CMAKE_CURRENT_SOURCE_DIR}" "" - swig_source_file_relative_path - "${swig_source_file_path}") - else() - if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}") - string(REGEX REPLACE - "^${CMAKE_CURRENT_BINARY_DIR}" "" - swig_source_file_relative_path - "${swig_source_file_path}") - set(swig_source_file_generated 1) - else() - set(swig_source_file_relative_path "${swig_source_file_path}") - if(swig_source_file_generated) - set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}") - else() - set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}") - endif() - endif() - endif() - - set(swig_generated_file_fullname - "${CMAKE_CURRENT_BINARY_DIR}") - if(swig_source_file_relative_path) - set(swig_generated_file_fullname - "${swig_generated_file_fullname}/${swig_source_file_relative_path}") - endif() - # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir - if(CMAKE_SWIG_OUTDIR) - set(swig_outdir ${CMAKE_SWIG_OUTDIR}) - else() - set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR}) - endif() - SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE} - swig_extra_generated_files - "${swig_outdir}" - "${infile}") - set(swig_generated_file_fullname - "${swig_generated_file_fullname}/${swig_source_file_name_we}") - # add the language into the name of the file (i.e. TCL_wrap) - # this allows for the same .i file to be wrapped into different languages - set(swig_generated_file_fullname - "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap") - - if(swig_source_file_cplusplus) - set(swig_generated_file_fullname - "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}") - else() - set(swig_generated_file_fullname - "${swig_generated_file_fullname}.c") - endif() - - # Shut up some warnings from poor SWIG code generation that we - # can do nothing about, when this flag is available - include(CheckCXXCompilerFlag) - check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - set_source_files_properties(${swig_generated_file_fullname} - PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable") - endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - - get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES) - set(swig_include_dirs) - foreach(it ${cmake_include_directories}) - set(swig_include_dirs ${swig_include_dirs} "-I${it}") - endforeach() - - set(swig_special_flags) - # default is c, so add c++ flag if it is c++ - if(swig_source_file_cplusplus) - set(swig_special_flags ${swig_special_flags} "-c++") - endif() - set(swig_extra_flags) - if(SWIG_MODULE_${name}_EXTRA_FLAGS) - set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS}) - endif() - - # hack to work around CMake bug in add_custom_command with multiple OUTPUT files - - file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib -unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] -print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))" - OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE - ) - - file( - WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in - "int main(void){return 0;}\n" - ) - - # create dummy dependencies - add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp - COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in - ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp - DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS} - COMMENT "" - ) - - # create the dummy target - add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp) - - # add a custom command to the dummy target - add_custom_command( - TARGET ${_target} - # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir) - COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir} - COMMAND "${SWIG_EXECUTABLE}" - ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}" - ${swig_source_file_flags} - ${CMAKE_SWIG_FLAGS} - -outdir ${swig_outdir} - ${swig_special_flags} - ${swig_extra_flags} - ${swig_include_dirs} - -o "${swig_generated_file_fullname}" - "${swig_source_file_fullname}" - COMMENT "Swig source" - ) - - #add dummy independent dependencies from the _target to each file - #that will be generated by the SWIG command above - - set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files}) - - foreach(swig_gen_file ${${outfiles}}) - add_custom_command( - OUTPUT ${swig_gen_file} - COMMAND "" - DEPENDS ${_target} - COMMENT "" - ) - endforeach() - - set_source_files_properties( - ${outfiles} PROPERTIES GENERATED 1 - ) - -endmacro() - -# -# Create Swig module -# -macro(SWIG_ADD_MODULE name language) - SWIG_MODULE_INITIALIZE(${name} ${language}) - set(swig_dot_i_sources) - set(swig_other_sources) - foreach(it ${ARGN}) - if(${it} MATCHES ".*\\.i$") - set(swig_dot_i_sources ${swig_dot_i_sources} "${it}") - else() - set(swig_other_sources ${swig_other_sources} "${it}") - endif() - endforeach() - - set(swig_generated_sources) - foreach(it ${swig_dot_i_sources}) - SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it}) - set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}") - endforeach() - get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES) - set_directory_properties(PROPERTIES - ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}") - add_library(${SWIG_MODULE_${name}_REAL_NAME} - MODULE - ${swig_generated_sources} - ${swig_other_sources}) - string(TOLOWER "${language}" swig_lowercase_language) - if ("${swig_lowercase_language}" STREQUAL "java") - if (APPLE) - # In java you want: - # System.loadLibrary("LIBRARY"); - # then JNI will look for a library whose name is platform dependent, namely - # MacOS : libLIBRARY.jnilib - # Windows: LIBRARY.dll - # Linux : libLIBRARY.so - set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib") - endif () - endif () - if ("${swig_lowercase_language}" STREQUAL "python") - # this is only needed for the python case where a _modulename.so is generated - set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "") - # Python extension modules on Windows must have the extension ".pyd" - # instead of ".dll" as of Python 2.5. Older python versions do support - # this suffix. - # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000 - # - # Windows: .dll is no longer supported as a filename extension for extension modules. - # .pyd is now the only filename extension that will be searched for. - # - if(WIN32 AND NOT CYGWIN) - set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd") - endif() - endif () -endmacro() - -# -# Like TARGET_LINK_LIBRARIES but for swig modules -# -macro(SWIG_LINK_LIBRARIES name) - if(SWIG_MODULE_${name}_REAL_NAME) - target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN}) - else() - message(SEND_ERROR "Cannot find Swig library \"${name}\".") - endif() -endmacro() diff --git a/cmake/Modules/nordicConfig.cmake b/cmake/Modules/nordicConfig.cmake deleted file mode 100644 index 366ceeb..0000000 --- a/cmake/Modules/nordicConfig.cmake +++ /dev/null @@ -1,30 +0,0 @@ -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_NORDIC nordic) - -FIND_PATH( - NORDIC_INCLUDE_DIRS - NAMES nordic/api.h - HINTS $ENV{NORDIC_DIR}/include - ${PC_NORDIC_INCLUDEDIR} - PATHS ${CMAKE_INSTALL_PREFIX}/include - /usr/local/include - /usr/include -) - -FIND_LIBRARY( - NORDIC_LIBRARIES - NAMES gnuradio-nordic - HINTS $ENV{NORDIC_DIR}/lib - ${PC_NORDIC_LIBDIR} - PATHS ${CMAKE_INSTALL_PREFIX}/lib - ${CMAKE_INSTALL_PREFIX}/lib64 - /usr/local/lib - /usr/local/lib64 - /usr/lib - /usr/lib64 -) - -INCLUDE(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(NORDIC DEFAULT_MSG NORDIC_LIBRARIES NORDIC_INCLUDE_DIRS) -MARK_AS_ADVANCED(NORDIC_LIBRARIES NORDIC_INCLUDE_DIRS) - diff --git a/cmake/cmake_uninstall.cmake.in b/cmake/cmake_uninstall.cmake.in deleted file mode 100644 index 9ae1ae4..0000000 --- a/cmake/cmake_uninstall.cmake.in +++ /dev/null @@ -1,32 +0,0 @@ -# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F - -IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") - MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") -ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") - -FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) -STRING(REGEX REPLACE "\n" ";" files "${files}") -FOREACH(file ${files}) - MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") - IF(EXISTS "$ENV{DESTDIR}${file}") - EXEC_PROGRAM( - "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" - OUTPUT_VARIABLE rm_out - RETURN_VALUE rm_retval - ) - IF(NOT "${rm_retval}" STREQUAL 0) - MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") - ENDIF(NOT "${rm_retval}" STREQUAL 0) - ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}") - EXEC_PROGRAM( - "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" - OUTPUT_VARIABLE rm_out - RETURN_VALUE rm_retval - ) - IF(NOT "${rm_retval}" STREQUAL 0) - MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") - ENDIF(NOT "${rm_retval}" STREQUAL 0) - ELSE(EXISTS "$ENV{DESTDIR}${file}") - MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") - ENDIF(EXISTS "$ENV{DESTDIR}${file}") -ENDFOREACH(file) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt deleted file mode 100644 index 32aedd1..0000000 --- a/docs/CMakeLists.txt +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Setup dependencies -######################################################################## -find_package(Doxygen) - -######################################################################## -# Begin conditional configuration -######################################################################## -if(ENABLE_DOXYGEN) - -######################################################################## -# Add subdirectories -######################################################################## -add_subdirectory(doxygen) - -endif(ENABLE_DOXYGEN) diff --git a/docs/README.nordic b/docs/README.nordic deleted file mode 100644 index f678e88..0000000 --- a/docs/README.nordic +++ /dev/null @@ -1,11 +0,0 @@ -This is the nordic-write-a-block package meant as a guide to building -out-of-tree packages. To use the nordic blocks, the Python namespaces -is in 'nordic', which is imported as: - - import nordic - -See the Doxygen documentation for details about the blocks available -in this package. A quick listing of the details can be found in Python -after importing by using: - - help(nordic) diff --git a/docs/doxygen/CMakeLists.txt b/docs/doxygen/CMakeLists.txt deleted file mode 100644 index 107f05e..0000000 --- a/docs/doxygen/CMakeLists.txt +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Create the doxygen configuration file -######################################################################## -file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir) -file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir) -file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir) -file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir) - -set(HAVE_DOT ${DOXYGEN_DOT_FOUND}) -set(enable_html_docs YES) -set(enable_latex_docs NO) -set(enable_xml_docs YES) - -configure_file( - ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in - ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile -@ONLY) - -set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html) - -######################################################################## -# Make and install doxygen docs -######################################################################## -add_custom_command( - OUTPUT ${BUILT_DIRS} - COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Generating documentation with doxygen" -) - -add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS}) - -install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR}) diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in deleted file mode 100644 index 17a451d..0000000 --- a/docs/doxygen/Doxyfile.in +++ /dev/null @@ -1,1922 +0,0 @@ -# Doxyfile 1.8.4 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed -# in front of the TAG it is preceding . -# All text after a hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" "). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or sequence of words) that should -# identify the project. Note that if you do not use Doxywizard you need -# to put quotes around the project name if it contains spaces. - -PROJECT_NAME = "GNU Radio's NORDIC Package" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer -# a quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is -# included in the documentation. The maximum height of the logo should not -# exceed 55 pixels and the maximum width should not exceed 200 pixels. -# Doxygen will copy the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, -# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, -# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = YES - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = NO - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. Note that you specify absolute paths here, but also -# relative paths, which will be relative from the directory where doxygen is -# started. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful if your file system -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = YES - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding -# "class=itcl::class" will allow you to use the command class in the -# itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, -# and language is one of the parsers supported by doxygen: IDL, Java, -# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, -# C++. For instance to make doxygen treat .inc files as Fortran files (default -# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note -# that for custom extensions you also need to set FILE_PATTERNS otherwise the -# files are not read by doxygen. - -EXTENSION_MAPPING = - -# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all -# comments according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you -# can mix doxygen, HTML, and XML commands with Markdown formatting. -# Disable only in case of backward compatibilities issues. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also makes the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES (the -# default) will make doxygen replace the get and set methods by a property in -# the documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and -# unions are shown inside the group in which they are included (e.g. using -# @ingroup) instead of on a separate page (for HTML and Man pages) or -# section (for LaTeX and RTF). - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and -# unions with only public data fields or simple typedef fields will be shown -# inline in the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO (the default), structs, classes, and unions are shown on a separate -# page (for HTML and Man pages) or section (for LaTeX and RTF). - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can -# be an expensive process and often the same symbol appear multiple times in -# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too -# small doxygen will become slower. If the cache is too large, memory is wasted. -# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid -# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 -# symbols. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespaces are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to -# do proper type resolution of all parameters of a function it will reject a -# match between the prototype and the implementation of a member function even -# if there is only one candidate or it is obvious which candidate to choose -# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen -# will still accept a match between prototype and implementation in such cases. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = NO - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = NO - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = NO - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= NO - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if section-label ... \endif -# and \cond section-label ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or macro consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and macros in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. -# This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = NO - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files -# containing the references data. This must be a list of .bib files. The -# .bib extension is automatically appended if omitted. Using this command -# requires the bibtex tool to be installed. See also -# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style -# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this -# feature you need bibtex and perl available in the search path. Do not use -# file names with spaces, bibtex cannot handle them. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# The WARN_NO_PARAMDOC option can be enabled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text " - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = @top_srcdir@ \ - @top_builddir@ - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh -# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py -# *.f90 *.f *.for *.vhd *.vhdl - -FILE_PATTERNS = *.h \ - *.dox - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = @abs_top_builddir@/docs/doxygen/html \ - @abs_top_builddir@/docs/doxygen/xml \ - @abs_top_builddir@/docs/doxygen/other/doxypy.py \ - @abs_top_builddir@/_CPack_Packages \ - @abs_top_srcdir@/cmake - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = */.deps/* \ - */.libs/* \ - */.svn/* \ - */CVS/* \ - */__init__.py \ - */qa_*.cc \ - */qa_*.h \ - */qa_*.py - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = ad9862 \ - numpy \ - *swig* \ - *Swig* \ - *my_top_block* \ - *my_graph* \ - *app_top_block* \ - *am_rx_graph* \ - *_queue_watcher_thread* \ - *parse* \ - *MyFrame* \ - *MyApp* \ - *PyObject* \ - *wfm_rx_block* \ - *_sptr* \ - *debug* \ - *wfm_rx_sca_block* \ - *tv_rx_block* \ - *wxapt_rx_block* \ - *example_signal* - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. -# If FILTER_PATTERNS is specified, this tag will be ignored. -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. -# Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. -# The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty or if -# non of the patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = *.py=@top_srcdir@/doc/doxygen/other/doxypy.py - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) -# and it is also possible to disable source filtering for a specific pattern -# using *.ext= (so without naming a filter). This option only has effect when -# FILTER_SOURCE_FILES is enabled. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C, C++ and Fortran comments will always remain visible. - -STRIP_CODE_COMMENTS = NO - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. -# Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = @enable_html_docs@ - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. Note that when using a custom header you are responsible -# for the proper inclusion of any scripts and style sheets that doxygen -# needs, which is dependent on the configuration options used. -# It is advised to generate a default header using "doxygen -w html -# header.html footer.html stylesheet.css YourConfigFile" and then modify -# that header. Note that the header is subject to change so you typically -# have to redo this when upgrading to a newer version of doxygen or when -# changing the value of configuration settings such as GENERATE_TREEVIEW! - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If left blank doxygen will -# generate a default style sheet. Note that it is recommended to use -# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this -# tag will in the future become obsolete. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional -# user-defined cascading style sheet that is included after the standard -# style sheets created by doxygen. Using this option one can overrule -# certain style aspects. This is preferred over using HTML_STYLESHEET -# since it does not replace the standard style sheet and is therefor more -# robust against future updates. Doxygen will copy the style sheet file to -# the output directory. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that -# the files will be copied as-is; there are no commands or markers available. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the style sheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of -# entries shown in the various tree structured indices initially; the user -# can expand and collapse entries dynamically later on. Doxygen will expand -# the tree to such a level that at most the specified number of entries are -# visible (unless a fully collapsed tree already exceeds this amount). -# So setting the number of entries 1 will produce a full collapsed tree by -# default. 0 is a special value representing an infinite number of entries -# and will result in a full expanded tree by default. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely -# identify the documentation publisher. This should be a reverse domain-name -# style string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = YES - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# -# Qt Help Project / Custom Filters. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# -# Qt Help Project / Filter Attributes. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) -# at top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. Since the tabs have the same information as the -# navigation tree you can set this option to NO if you already set -# GENERATE_TREEVIEW to YES. - -DISABLE_INDEX = YES - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. -# Since the tree basically has the same information as the tab index you -# could consider to set DISABLE_INDEX to NO when enabling this option. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values -# (range [0,1..20]) that doxygen will group on one line in the generated HTML -# documentation. Note that a value of 0 will completely suppress the enum -# values from appearing in the overview section. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 180 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax -# (see http://www.mathjax.org) which uses client side Javascript for the -# rendering instead of using prerendered bitmaps. Use this if you do not -# have LaTeX installed or if you want to formulas look prettier in the HTML -# output. When enabled you may also need to install MathJax separately and -# configure the path to it using the MATHJAX_RELPATH option. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and -# SVG. The default value is HTML-CSS, which is slower, but has the best -# compatibility. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the -# HTML output directory using the MATHJAX_RELPATH option. The destination -# directory should contain the MathJax.js script. For instance, if the mathjax -# directory is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to -# the MathJax Content Delivery Network so you can quickly see the result without -# installing MathJax. -# However, it is strongly recommended to install a local -# copy of MathJax from http://www.mathjax.org before deployment. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension -# names that should be enabled during MathJax rendering. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript -# pieces of code that will be used on startup of the MathJax code. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = NO - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. -# There are two flavours of web server based search depending on the -# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for -# searching and an index file used by the script. When EXTERNAL_SEARCH is -# enabled the indexing and searching needs to be provided by external tools. -# See the manual for details. - -SERVER_BASED_SEARCH = NO - -# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP -# script for searching. Instead the search results are written to an XML file -# which needs to be processed by an external indexer. Doxygen will invoke an -# external search engine pointed to by the SEARCHENGINE_URL option to obtain -# the search results. Doxygen ships with an example indexer (doxyindexer) and -# search engine (doxysearch.cgi) which are based on the open source search -# engine library Xapian. See the manual for configuration details. - -EXTERNAL_SEARCH = NO - -# The SEARCHENGINE_URL should point to a search engine hosted by a web server -# which will returned the search results when EXTERNAL_SEARCH is enabled. -# Doxygen ships with an example search engine (doxysearch) which is based on -# the open source search engine library Xapian. See the manual for configuration -# details. - -SEARCHENGINE_URL = - -# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed -# search data is written to a file for indexing by an external tool. With the -# SEARCHDATA_FILE tag the name of this file can be specified. - -SEARCHDATA_FILE = searchdata.xml - -# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the -# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is -# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple -# projects and redirect the results back to the right project. - -EXTERNAL_SEARCH_ID = - -# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen -# projects other than the one defined by this configuration file, but that are -# all added to the same external search index. Each project needs to have a -# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id -# of to a relative location where the documentation can be found. -# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... - -EXTRA_SEARCH_MAPPINGS = - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = @enable_latex_docs@ - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, letter, legal and -# executive. If left blank a4 will be used. - -PAPER_TYPE = letter - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for -# the generated latex document. The footer should contain everything after -# the last chapter. If it is left blank doxygen will generate a -# standard footer. Notice: only use this tag if you know what you are doing! - -LATEX_FOOTER = - -# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images -# or other source files which should be copied to the LaTeX output directory. -# Note that the files will be copied as-is; there are no commands or markers -# available. - -LATEX_EXTRA_FILES = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = NO - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - -# The LATEX_BIB_STYLE tag can be used to specify the style to use for the -# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See -# http://en.wikipedia.org/wiki/BibTeX for more info. - -LATEX_BIB_STYLE = plain - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load style sheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = @enable_xml_docs@ - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = NO - -#--------------------------------------------------------------------------- -# configuration options related to the DOCBOOK output -#--------------------------------------------------------------------------- - -# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files -# that can be used to generate PDF. - -GENERATE_DOCBOOK = NO - -# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in -# front of it. If left blank docbook will be used as the default path. - -DOCBOOK_OUTPUT = docbook - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. -# This is useful -# if you want to understand what is going on. -# On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# pointed to by INCLUDE_PATH will be searched when a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition that -# overrules the definition found in the source code. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all references to function-like macros -# that are alone on a line, have an all uppercase name, and do not end with a -# semicolon, because these will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. For each -# tag file the location of the external documentation should be added. The -# format of a tag file without this location is as follows: -# -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths -# or URLs. Note that each tag file must have a unique name (where the name does -# NOT include the path). If a tag file is not located in the directory in which -# doxygen is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed -# in the related pages index. If set to NO, only the current project's -# pages will be listed. - -EXTERNAL_PAGES = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option also works with HAVE_DOT disabled, but it is recommended to -# install and use dot, since it yields more powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = @HAVE_DOT@ - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will use the Helvetica font for all dot files that -# doxygen generates. When you want a differently looking font you can specify -# the font name using DOT_FONTNAME. You need to make sure dot is able to find -# the font, which can be done by putting it in a standard location or by setting -# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the -# directory containing the font. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the Helvetica font. -# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to -# set the path where dot can find it. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = NO - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If the UML_LOOK tag is enabled, the fields and methods are shown inside -# the class node. If there are many fields or methods and many nodes the -# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS -# threshold limits the number of items for each type to make the size more -# manageable. Set this to 0 for no limit. Note that the threshold may be -# exceeded by 50% before the limit is enforced. - -UML_LIMIT_NUM_FIELDS = 10 - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will generate a graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are svg, png, jpg, or gif. -# If left blank png will be used. If you choose svg you need to set -# HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible in IE 9+ (other browsers do not have this requirement). - -DOT_IMAGE_FORMAT = png - -# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to -# enable generation of interactive SVG images that allow zooming and panning. -# Note that this requires a modern browser other than Internet Explorer. -# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you -# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible. Older versions of IE do not have SVG support. - -INTERACTIVE_SVG = NO - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the -# \mscfile command). - -MSCFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = YES - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/docs/doxygen/Doxyfile.swig_doc.in b/docs/doxygen/Doxyfile.swig_doc.in deleted file mode 100644 index 57736d7..0000000 --- a/docs/doxygen/Doxyfile.swig_doc.in +++ /dev/null @@ -1,1890 +0,0 @@ -# Doxyfile 1.8.4 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed -# in front of the TAG it is preceding . -# All text after a hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" "). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or sequence of words) that should -# identify the project. Note that if you do not use Doxywizard you need -# to put quotes around the project name if it contains spaces. - -PROJECT_NAME = @CPACK_PACKAGE_NAME@ - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer -# a quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is -# included in the documentation. The maximum height of the logo should not -# exceed 55 pixels and the maximum width should not exceed 200 pixels. -# Doxygen will copy the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@ - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, -# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, -# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. Note that you specify absolute paths here, but also -# relative paths, which will be relative from the directory where doxygen is -# started. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful if your file system -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding -# "class=itcl::class" will allow you to use the command class in the -# itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, -# and language is one of the parsers supported by doxygen: IDL, Java, -# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, -# C++. For instance to make doxygen treat .inc files as Fortran files (default -# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note -# that for custom extensions you also need to set FILE_PATTERNS otherwise the -# files are not read by doxygen. - -EXTENSION_MAPPING = - -# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all -# comments according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you -# can mix doxygen, HTML, and XML commands with Markdown formatting. -# Disable only in case of backward compatibilities issues. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also makes the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES (the -# default) will make doxygen replace the get and set methods by a property in -# the documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and -# unions are shown inside the group in which they are included (e.g. using -# @ingroup) instead of on a separate page (for HTML and Man pages) or -# section (for LaTeX and RTF). - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and -# unions with only public data fields or simple typedef fields will be shown -# inline in the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO (the default), structs, classes, and unions are shown on a separate -# page (for HTML and Man pages) or section (for LaTeX and RTF). - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can -# be an expensive process and often the same symbol appear multiple times in -# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too -# small doxygen will become slower. If the cache is too large, memory is wasted. -# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid -# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 -# symbols. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespaces are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to -# do proper type resolution of all parameters of a function it will reject a -# match between the prototype and the implementation of a member function even -# if there is only one candidate or it is obvious which candidate to choose -# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen -# will still accept a match between prototype and implementation in such cases. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if section-label ... \endif -# and \cond section-label ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or macro consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and macros in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. -# This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files -# containing the references data. This must be a list of .bib files. The -# .bib extension is automatically appended if omitted. Using this command -# requires the bibtex tool to be installed. See also -# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style -# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this -# feature you need bibtex and perl available in the search path. Do not use -# file names with spaces, bibtex cannot handle them. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# The WARN_NO_PARAMDOC option can be enabled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = @INPUT_PATHS@ - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh -# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py -# *.f90 *.f *.for *.vhd *.vhdl - -FILE_PATTERNS = *.h - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. -# If FILTER_PATTERNS is specified, this tag will be ignored. -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. -# Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. -# The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty or if -# non of the patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) -# and it is also possible to disable source filtering for a specific pattern -# using *.ext= (so without naming a filter). This option only has effect when -# FILTER_SOURCE_FILES is enabled. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C, C++ and Fortran comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. -# Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = NO - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = NO - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. Note that when using a custom header you are responsible -# for the proper inclusion of any scripts and style sheets that doxygen -# needs, which is dependent on the configuration options used. -# It is advised to generate a default header using "doxygen -w html -# header.html footer.html stylesheet.css YourConfigFile" and then modify -# that header. Note that the header is subject to change so you typically -# have to redo this when upgrading to a newer version of doxygen or when -# changing the value of configuration settings such as GENERATE_TREEVIEW! - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If left blank doxygen will -# generate a default style sheet. Note that it is recommended to use -# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this -# tag will in the future become obsolete. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional -# user-defined cascading style sheet that is included after the standard -# style sheets created by doxygen. Using this option one can overrule -# certain style aspects. This is preferred over using HTML_STYLESHEET -# since it does not replace the standard style sheet and is therefor more -# robust against future updates. Doxygen will copy the style sheet file to -# the output directory. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that -# the files will be copied as-is; there are no commands or markers available. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the style sheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of -# entries shown in the various tree structured indices initially; the user -# can expand and collapse entries dynamically later on. Doxygen will expand -# the tree to such a level that at most the specified number of entries are -# visible (unless a fully collapsed tree already exceeds this amount). -# So setting the number of entries 1 will produce a full collapsed tree by -# default. 0 is a special value representing an infinite number of entries -# and will result in a full expanded tree by default. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely -# identify the documentation publisher. This should be a reverse domain-name -# style string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# -# Qt Help Project / Custom Filters. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# -# Qt Help Project / Filter Attributes. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) -# at top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. Since the tabs have the same information as the -# navigation tree you can set this option to NO if you already set -# GENERATE_TREEVIEW to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. -# Since the tree basically has the same information as the tab index you -# could consider to set DISABLE_INDEX to NO when enabling this option. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values -# (range [0,1..20]) that doxygen will group on one line in the generated HTML -# documentation. Note that a value of 0 will completely suppress the enum -# values from appearing in the overview section. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax -# (see http://www.mathjax.org) which uses client side Javascript for the -# rendering instead of using prerendered bitmaps. Use this if you do not -# have LaTeX installed or if you want to formulas look prettier in the HTML -# output. When enabled you may also need to install MathJax separately and -# configure the path to it using the MATHJAX_RELPATH option. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and -# SVG. The default value is HTML-CSS, which is slower, but has the best -# compatibility. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the -# HTML output directory using the MATHJAX_RELPATH option. The destination -# directory should contain the MathJax.js script. For instance, if the mathjax -# directory is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to -# the MathJax Content Delivery Network so you can quickly see the result without -# installing MathJax. -# However, it is strongly recommended to install a local -# copy of MathJax from http://www.mathjax.org before deployment. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension -# names that should be enabled during MathJax rendering. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript -# pieces of code that will be used on startup of the MathJax code. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = YES - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. -# There are two flavours of web server based search depending on the -# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for -# searching and an index file used by the script. When EXTERNAL_SEARCH is -# enabled the indexing and searching needs to be provided by external tools. -# See the manual for details. - -SERVER_BASED_SEARCH = NO - -# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP -# script for searching. Instead the search results are written to an XML file -# which needs to be processed by an external indexer. Doxygen will invoke an -# external search engine pointed to by the SEARCHENGINE_URL option to obtain -# the search results. Doxygen ships with an example indexer (doxyindexer) and -# search engine (doxysearch.cgi) which are based on the open source search -# engine library Xapian. See the manual for configuration details. - -EXTERNAL_SEARCH = NO - -# The SEARCHENGINE_URL should point to a search engine hosted by a web server -# which will returned the search results when EXTERNAL_SEARCH is enabled. -# Doxygen ships with an example search engine (doxysearch) which is based on -# the open source search engine library Xapian. See the manual for configuration -# details. - -SEARCHENGINE_URL = - -# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed -# search data is written to a file for indexing by an external tool. With the -# SEARCHDATA_FILE tag the name of this file can be specified. - -SEARCHDATA_FILE = searchdata.xml - -# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the -# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is -# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple -# projects and redirect the results back to the right project. - -EXTERNAL_SEARCH_ID = - -# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen -# projects other than the one defined by this configuration file, but that are -# all added to the same external search index. Each project needs to have a -# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id -# of to a relative location where the documentation can be found. -# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... - -EXTRA_SEARCH_MAPPINGS = - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, letter, legal and -# executive. If left blank a4 will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for -# the generated latex document. The footer should contain everything after -# the last chapter. If it is left blank doxygen will generate a -# standard footer. Notice: only use this tag if you know what you are doing! - -LATEX_FOOTER = - -# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images -# or other source files which should be copied to the LaTeX output directory. -# Note that the files will be copied as-is; there are no commands or markers -# available. - -LATEX_EXTRA_FILES = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - -# The LATEX_BIB_STYLE tag can be used to specify the style to use for the -# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See -# http://en.wikipedia.org/wiki/BibTeX for more info. - -LATEX_BIB_STYLE = plain - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load style sheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = YES - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options related to the DOCBOOK output -#--------------------------------------------------------------------------- - -# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files -# that can be used to generate PDF. - -GENERATE_DOCBOOK = NO - -# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in -# front of it. If left blank docbook will be used as the default path. - -DOCBOOK_OUTPUT = docbook - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. -# This is useful -# if you want to understand what is going on. -# On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# pointed to by INCLUDE_PATH will be searched when a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition that -# overrules the definition found in the source code. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all references to function-like macros -# that are alone on a line, have an all uppercase name, and do not end with a -# semicolon, because these will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. For each -# tag file the location of the external documentation should be added. The -# format of a tag file without this location is as follows: -# -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths -# or URLs. Note that each tag file must have a unique name (where the name does -# NOT include the path). If a tag file is not located in the directory in which -# doxygen is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed -# in the related pages index. If set to NO, only the current project's -# pages will be listed. - -EXTERNAL_PAGES = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option also works with HAVE_DOT disabled, but it is recommended to -# install and use dot, since it yields more powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will use the Helvetica font for all dot files that -# doxygen generates. When you want a differently looking font you can specify -# the font name using DOT_FONTNAME. You need to make sure dot is able to find -# the font, which can be done by putting it in a standard location or by setting -# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the -# directory containing the font. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the Helvetica font. -# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to -# set the path where dot can find it. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If the UML_LOOK tag is enabled, the fields and methods are shown inside -# the class node. If there are many fields or methods and many nodes the -# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS -# threshold limits the number of items for each type to make the size more -# manageable. Set this to 0 for no limit. Note that the threshold may be -# exceeded by 50% before the limit is enforced. - -UML_LIMIT_NUM_FIELDS = 10 - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will generate a graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are svg, png, jpg, or gif. -# If left blank png will be used. If you choose svg you need to set -# HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible in IE 9+ (other browsers do not have this requirement). - -DOT_IMAGE_FORMAT = png - -# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to -# enable generation of interactive SVG images that allow zooming and panning. -# Note that this requires a modern browser other than Internet Explorer. -# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you -# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible. Older versions of IE do not have SVG support. - -INTERACTIVE_SVG = NO - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the -# \mscfile command). - -MSCFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = YES - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/docs/doxygen/doxyxml/__init__.py b/docs/doxygen/doxyxml/__init__.py deleted file mode 100644 index 6df88dc..0000000 --- a/docs/doxygen/doxyxml/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -""" -Python interface to contents of doxygen xml documentation. - -Example use: -See the contents of the example folder for the C++ and -doxygen-generated xml used in this example. - ->>> # Parse the doxygen docs. ->>> import os ->>> this_dir = os.path.dirname(globals()['__file__']) ->>> xml_path = this_dir + "/example/xml/" ->>> di = DoxyIndex(xml_path) - -Get a list of all top-level objects. - ->>> print([mem.name() for mem in di.members()]) -[u'Aadvark', u'aadvarky_enough', u'main'] - -Get all functions. - ->>> print([mem.name() for mem in di.in_category(DoxyFunction)]) -[u'aadvarky_enough', u'main'] - -Check if an object is present. - ->>> di.has_member(u'Aadvark') -True ->>> di.has_member(u'Fish') -False - -Get an item by name and check its properties. - ->>> aad = di.get_member(u'Aadvark') ->>> print(aad.brief_description) -Models the mammal Aadvark. ->>> print(aad.detailed_description) -Sadly the model is incomplete and cannot capture all aspects of an aadvark yet. - -This line is uninformative and is only to test line breaks in the comments. ->>> [mem.name() for mem in aad.members()] -[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness'] ->>> aad.get_member(u'print').brief_description -u'Outputs the vital aadvark statistics.' - -""" - -from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther - -def _test(): - import os - this_dir = os.path.dirname(globals()['__file__']) - xml_path = this_dir + "/example/xml/" - di = DoxyIndex(xml_path) - # Get the Aadvark class - aad = di.get_member('Aadvark') - aad.brief_description - import doctest - return doctest.testmod() - -if __name__ == "__main__": - _test() - diff --git a/docs/doxygen/doxyxml/base.py b/docs/doxygen/doxyxml/base.py deleted file mode 100644 index 383dc26..0000000 --- a/docs/doxygen/doxyxml/base.py +++ /dev/null @@ -1,216 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -""" -A base class is created. - -Classes based upon this are used to make more user-friendly interfaces -to the doxygen xml docs than the generated classes provide. -""" - -import os -import pdb - -from xml.parsers.expat import ExpatError - -from generated import compound - - -class Base(object): - - class Duplicate(StandardError): - pass - - class NoSuchMember(StandardError): - pass - - class ParsingError(StandardError): - pass - - def __init__(self, parse_data, top=None): - self._parsed = False - self._error = False - self._parse_data = parse_data - self._members = [] - self._dict_members = {} - self._in_category = {} - self._data = {} - if top is not None: - self._xml_path = top._xml_path - # Set up holder of references - else: - top = self - self._refs = {} - self._xml_path = parse_data - self.top = top - - @classmethod - def from_refid(cls, refid, top=None): - """ Instantiate class from a refid rather than parsing object. """ - # First check to see if its already been instantiated. - if top is not None and refid in top._refs: - return top._refs[refid] - # Otherwise create a new instance and set refid. - inst = cls(None, top=top) - inst.refid = refid - inst.add_ref(inst) - return inst - - @classmethod - def from_parse_data(cls, parse_data, top=None): - refid = getattr(parse_data, 'refid', None) - if refid is not None and top is not None and refid in top._refs: - return top._refs[refid] - inst = cls(parse_data, top=top) - if refid is not None: - inst.refid = refid - inst.add_ref(inst) - return inst - - def add_ref(self, obj): - if hasattr(obj, 'refid'): - self.top._refs[obj.refid] = obj - - mem_classes = [] - - def get_cls(self, mem): - for cls in self.mem_classes: - if cls.can_parse(mem): - return cls - raise StandardError(("Did not find a class for object '%s'." \ - % (mem.get_name()))) - - def convert_mem(self, mem): - try: - cls = self.get_cls(mem) - converted = cls.from_parse_data(mem, self.top) - if converted is None: - raise StandardError('No class matched this object.') - self.add_ref(converted) - return converted - except StandardError, e: - print e - - @classmethod - def includes(cls, inst): - return isinstance(inst, cls) - - @classmethod - def can_parse(cls, obj): - return False - - def _parse(self): - self._parsed = True - - def _get_dict_members(self, cat=None): - """ - For given category a dictionary is returned mapping member names to - members of that category. For names that are duplicated the name is - mapped to None. - """ - self.confirm_no_error() - if cat not in self._dict_members: - new_dict = {} - for mem in self.in_category(cat): - if mem.name() not in new_dict: - new_dict[mem.name()] = mem - else: - new_dict[mem.name()] = self.Duplicate - self._dict_members[cat] = new_dict - return self._dict_members[cat] - - def in_category(self, cat): - self.confirm_no_error() - if cat is None: - return self._members - if cat not in self._in_category: - self._in_category[cat] = [mem for mem in self._members - if cat.includes(mem)] - return self._in_category[cat] - - def get_member(self, name, cat=None): - self.confirm_no_error() - # Check if it's in a namespace or class. - bits = name.split('::') - first = bits[0] - rest = '::'.join(bits[1:]) - member = self._get_dict_members(cat).get(first, self.NoSuchMember) - # Raise any errors that are returned. - if member in set([self.NoSuchMember, self.Duplicate]): - raise member() - if rest: - return member.get_member(rest, cat=cat) - return member - - def has_member(self, name, cat=None): - try: - mem = self.get_member(name, cat=cat) - return True - except self.NoSuchMember: - return False - - def data(self): - self.confirm_no_error() - return self._data - - def members(self): - self.confirm_no_error() - return self._members - - def process_memberdefs(self): - mdtss = [] - for sec in self._retrieved_data.compounddef.sectiondef: - mdtss += sec.memberdef - # At the moment we lose all information associated with sections. - # Sometimes a memberdef is in several sectiondef. - # We make sure we don't get duplicates here. - uniques = set([]) - for mem in mdtss: - converted = self.convert_mem(mem) - pair = (mem.name, mem.__class__) - if pair not in uniques: - uniques.add(pair) - self._members.append(converted) - - def retrieve_data(self): - filename = os.path.join(self._xml_path, self.refid + '.xml') - try: - self._retrieved_data = compound.parse(filename) - except ExpatError: - print('Error in xml in file %s' % filename) - self._error = True - self._retrieved_data = None - - def check_parsed(self): - if not self._parsed: - self._parse() - - def confirm_no_error(self): - self.check_parsed() - if self._error: - raise self.ParsingError() - - def error(self): - self.check_parsed() - return self._error - - def name(self): - # first see if we can do it without processing. - if self._parse_data is not None: - return self._parse_data.name - self.check_parsed() - return self._retrieved_data.compounddef.name diff --git a/docs/doxygen/doxyxml/doxyindex.py b/docs/doxygen/doxyxml/doxyindex.py deleted file mode 100644 index 8fed1b4..0000000 --- a/docs/doxygen/doxyxml/doxyindex.py +++ /dev/null @@ -1,234 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -""" -Classes providing more user-friendly interfaces to the doxygen xml -docs than the generated classes provide. -""" - -import os - -from generated import index -from base import Base -from text import description - -class DoxyIndex(Base): - """ - Parses a doxygen xml directory. - """ - - __module__ = "gnuradio.utils.doxyxml" - - def _parse(self): - if self._parsed: - return - super(DoxyIndex, self)._parse() - self._root = index.parse(os.path.join(self._xml_path, 'index.xml')) - for mem in self._root.compound: - converted = self.convert_mem(mem) - # For files we want the contents to be accessible directly - # from the parent rather than having to go through the file - # object. - if self.get_cls(mem) == DoxyFile: - if mem.name.endswith('.h'): - self._members += converted.members() - self._members.append(converted) - else: - self._members.append(converted) - - -def generate_swig_doc_i(self): - """ - %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " - Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; - """ - pass - - -class DoxyCompMem(Base): - - - kind = None - - def __init__(self, *args, **kwargs): - super(DoxyCompMem, self).__init__(*args, **kwargs) - - @classmethod - def can_parse(cls, obj): - return obj.kind == cls.kind - - def set_descriptions(self, parse_data): - bd = description(getattr(parse_data, 'briefdescription', None)) - dd = description(getattr(parse_data, 'detaileddescription', None)) - self._data['brief_description'] = bd - self._data['detailed_description'] = dd - -class DoxyCompound(DoxyCompMem): - pass - -class DoxyMember(DoxyCompMem): - pass - - -class DoxyFunction(DoxyMember): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'function' - - def _parse(self): - if self._parsed: - return - super(DoxyFunction, self)._parse() - self.set_descriptions(self._parse_data) - self._data['params'] = [] - prms = self._parse_data.param - for prm in prms: - self._data['params'].append(DoxyParam(prm)) - - brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) - params = property(lambda self: self.data()['params']) - -Base.mem_classes.append(DoxyFunction) - - -class DoxyParam(DoxyMember): - - __module__ = "gnuradio.utils.doxyxml" - - def _parse(self): - if self._parsed: - return - super(DoxyParam, self)._parse() - self.set_descriptions(self._parse_data) - self._data['declname'] = self._parse_data.declname - - brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) - declname = property(lambda self: self.data()['declname']) - -class DoxyClass(DoxyCompound): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'class' - - def _parse(self): - if self._parsed: - return - super(DoxyClass, self)._parse() - self.retrieve_data() - if self._error: - return - self.set_descriptions(self._retrieved_data.compounddef) - # Sectiondef.kind tells about whether private or public. - # We just ignore this for now. - self.process_memberdefs() - - brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) - -Base.mem_classes.append(DoxyClass) - - -class DoxyFile(DoxyCompound): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'file' - - def _parse(self): - if self._parsed: - return - super(DoxyFile, self)._parse() - self.retrieve_data() - self.set_descriptions(self._retrieved_data.compounddef) - if self._error: - return - self.process_memberdefs() - - brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) - -Base.mem_classes.append(DoxyFile) - - -class DoxyNamespace(DoxyCompound): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'namespace' - -Base.mem_classes.append(DoxyNamespace) - - -class DoxyGroup(DoxyCompound): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'group' - - def _parse(self): - if self._parsed: - return - super(DoxyGroup, self)._parse() - self.retrieve_data() - if self._error: - return - cdef = self._retrieved_data.compounddef - self._data['title'] = description(cdef.title) - # Process inner groups - grps = cdef.innergroup - for grp in grps: - converted = DoxyGroup.from_refid(grp.refid, top=self.top) - self._members.append(converted) - # Process inner classes - klasses = cdef.innerclass - for kls in klasses: - converted = DoxyClass.from_refid(kls.refid, top=self.top) - self._members.append(converted) - # Process normal members - self.process_memberdefs() - - title = property(lambda self: self.data()['title']) - - -Base.mem_classes.append(DoxyGroup) - - -class DoxyFriend(DoxyMember): - - __module__ = "gnuradio.utils.doxyxml" - - kind = 'friend' - -Base.mem_classes.append(DoxyFriend) - - -class DoxyOther(Base): - - __module__ = "gnuradio.utils.doxyxml" - - kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page']) - - @classmethod - def can_parse(cls, obj): - return obj.kind in cls.kinds - -Base.mem_classes.append(DoxyOther) - diff --git a/docs/doxygen/doxyxml/generated/__init__.py b/docs/doxygen/doxyxml/generated/__init__.py deleted file mode 100644 index 3982397..0000000 --- a/docs/doxygen/doxyxml/generated/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -Contains generated files produced by generateDS.py. - -These do the real work of parsing the doxygen xml files but the -resultant classes are not very friendly to navigate so the rest of the -doxyxml module processes them further. -""" diff --git a/docs/doxygen/doxyxml/generated/compound.py b/docs/doxygen/doxyxml/generated/compound.py deleted file mode 100644 index 1522ac2..0000000 --- a/docs/doxygen/doxyxml/generated/compound.py +++ /dev/null @@ -1,503 +0,0 @@ -#!/usr/bin/env python - -""" -Generated Mon Feb 9 19:08:05 2009 by generateDS.py. -""" - -from string import lower as str_lower -from xml.dom import minidom -from xml.dom import Node - -import sys - -import compoundsuper as supermod -from compoundsuper import MixedContainer - - -class DoxygenTypeSub(supermod.DoxygenType): - def __init__(self, version=None, compounddef=None): - supermod.DoxygenType.__init__(self, version, compounddef) - - def find(self, details): - - return self.compounddef.find(details) - -supermod.DoxygenType.subclass = DoxygenTypeSub -# end class DoxygenTypeSub - - -class compounddefTypeSub(supermod.compounddefType): - def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): - supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers) - - def find(self, details): - - if self.id == details.refid: - return self - - for sectiondef in self.sectiondef: - result = sectiondef.find(details) - if result: - return result - - -supermod.compounddefType.subclass = compounddefTypeSub -# end class compounddefTypeSub - - -class listofallmembersTypeSub(supermod.listofallmembersType): - def __init__(self, member=None): - supermod.listofallmembersType.__init__(self, member) -supermod.listofallmembersType.subclass = listofallmembersTypeSub -# end class listofallmembersTypeSub - - -class memberRefTypeSub(supermod.memberRefType): - def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''): - supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name) -supermod.memberRefType.subclass = memberRefTypeSub -# end class memberRefTypeSub - - -class compoundRefTypeSub(supermod.compoundRefType): - def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.compoundRefType.__init__(self, mixedclass_, content_) -supermod.compoundRefType.subclass = compoundRefTypeSub -# end class compoundRefTypeSub - - -class reimplementTypeSub(supermod.reimplementType): - def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.reimplementType.__init__(self, mixedclass_, content_) -supermod.reimplementType.subclass = reimplementTypeSub -# end class reimplementTypeSub - - -class incTypeSub(supermod.incType): - def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.incType.__init__(self, mixedclass_, content_) -supermod.incType.subclass = incTypeSub -# end class incTypeSub - - -class refTypeSub(supermod.refType): - def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - supermod.refType.__init__(self, mixedclass_, content_) -supermod.refType.subclass = refTypeSub -# end class refTypeSub - - - -class refTextTypeSub(supermod.refTextType): - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - supermod.refTextType.__init__(self, mixedclass_, content_) - -supermod.refTextType.subclass = refTextTypeSub -# end class refTextTypeSub - -class sectiondefTypeSub(supermod.sectiondefType): - - - def __init__(self, kind=None, header='', description=None, memberdef=None): - supermod.sectiondefType.__init__(self, kind, header, description, memberdef) - - def find(self, details): - - for memberdef in self.memberdef: - if memberdef.id == details.refid: - return memberdef - - return None - - -supermod.sectiondefType.subclass = sectiondefTypeSub -# end class sectiondefTypeSub - - -class memberdefTypeSub(supermod.memberdefType): - def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): - supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby) -supermod.memberdefType.subclass = memberdefTypeSub -# end class memberdefTypeSub - - -class descriptionTypeSub(supermod.descriptionType): - def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None): - supermod.descriptionType.__init__(self, mixedclass_, content_) -supermod.descriptionType.subclass = descriptionTypeSub -# end class descriptionTypeSub - - -class enumvalueTypeSub(supermod.enumvalueType): - def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): - supermod.enumvalueType.__init__(self, mixedclass_, content_) -supermod.enumvalueType.subclass = enumvalueTypeSub -# end class enumvalueTypeSub - - -class templateparamlistTypeSub(supermod.templateparamlistType): - def __init__(self, param=None): - supermod.templateparamlistType.__init__(self, param) -supermod.templateparamlistType.subclass = templateparamlistTypeSub -# end class templateparamlistTypeSub - - -class paramTypeSub(supermod.paramType): - def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None): - supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription) -supermod.paramType.subclass = paramTypeSub -# end class paramTypeSub - - -class linkedTextTypeSub(supermod.linkedTextType): - def __init__(self, ref=None, mixedclass_=None, content_=None): - supermod.linkedTextType.__init__(self, mixedclass_, content_) -supermod.linkedTextType.subclass = linkedTextTypeSub -# end class linkedTextTypeSub - - -class graphTypeSub(supermod.graphType): - def __init__(self, node=None): - supermod.graphType.__init__(self, node) -supermod.graphType.subclass = graphTypeSub -# end class graphTypeSub - - -class nodeTypeSub(supermod.nodeType): - def __init__(self, id=None, label='', link=None, childnode=None): - supermod.nodeType.__init__(self, id, label, link, childnode) -supermod.nodeType.subclass = nodeTypeSub -# end class nodeTypeSub - - -class childnodeTypeSub(supermod.childnodeType): - def __init__(self, relation=None, refid=None, edgelabel=None): - supermod.childnodeType.__init__(self, relation, refid, edgelabel) -supermod.childnodeType.subclass = childnodeTypeSub -# end class childnodeTypeSub - - -class linkTypeSub(supermod.linkType): - def __init__(self, refid=None, external=None, valueOf_=''): - supermod.linkType.__init__(self, refid, external) -supermod.linkType.subclass = linkTypeSub -# end class linkTypeSub - - -class listingTypeSub(supermod.listingType): - def __init__(self, codeline=None): - supermod.listingType.__init__(self, codeline) -supermod.listingType.subclass = listingTypeSub -# end class listingTypeSub - - -class codelineTypeSub(supermod.codelineType): - def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): - supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) -supermod.codelineType.subclass = codelineTypeSub -# end class codelineTypeSub - - -class highlightTypeSub(supermod.highlightType): - def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None): - supermod.highlightType.__init__(self, mixedclass_, content_) -supermod.highlightType.subclass = highlightTypeSub -# end class highlightTypeSub - - -class referenceTypeSub(supermod.referenceType): - def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): - supermod.referenceType.__init__(self, mixedclass_, content_) -supermod.referenceType.subclass = referenceTypeSub -# end class referenceTypeSub - - -class locationTypeSub(supermod.locationType): - def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): - supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file) -supermod.locationType.subclass = locationTypeSub -# end class locationTypeSub - - -class docSect1TypeSub(supermod.docSect1Type): - def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None): - supermod.docSect1Type.__init__(self, mixedclass_, content_) -supermod.docSect1Type.subclass = docSect1TypeSub -# end class docSect1TypeSub - - -class docSect2TypeSub(supermod.docSect2Type): - def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None): - supermod.docSect2Type.__init__(self, mixedclass_, content_) -supermod.docSect2Type.subclass = docSect2TypeSub -# end class docSect2TypeSub - - -class docSect3TypeSub(supermod.docSect3Type): - def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None): - supermod.docSect3Type.__init__(self, mixedclass_, content_) -supermod.docSect3Type.subclass = docSect3TypeSub -# end class docSect3TypeSub - - -class docSect4TypeSub(supermod.docSect4Type): - def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None): - supermod.docSect4Type.__init__(self, mixedclass_, content_) -supermod.docSect4Type.subclass = docSect4TypeSub -# end class docSect4TypeSub - - -class docInternalTypeSub(supermod.docInternalType): - def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): - supermod.docInternalType.__init__(self, mixedclass_, content_) -supermod.docInternalType.subclass = docInternalTypeSub -# end class docInternalTypeSub - - -class docInternalS1TypeSub(supermod.docInternalS1Type): - def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): - supermod.docInternalS1Type.__init__(self, mixedclass_, content_) -supermod.docInternalS1Type.subclass = docInternalS1TypeSub -# end class docInternalS1TypeSub - - -class docInternalS2TypeSub(supermod.docInternalS2Type): - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - supermod.docInternalS2Type.__init__(self, mixedclass_, content_) -supermod.docInternalS2Type.subclass = docInternalS2TypeSub -# end class docInternalS2TypeSub - - -class docInternalS3TypeSub(supermod.docInternalS3Type): - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - supermod.docInternalS3Type.__init__(self, mixedclass_, content_) -supermod.docInternalS3Type.subclass = docInternalS3TypeSub -# end class docInternalS3TypeSub - - -class docInternalS4TypeSub(supermod.docInternalS4Type): - def __init__(self, para=None, mixedclass_=None, content_=None): - supermod.docInternalS4Type.__init__(self, mixedclass_, content_) -supermod.docInternalS4Type.subclass = docInternalS4TypeSub -# end class docInternalS4TypeSub - - -class docURLLinkSub(supermod.docURLLink): - def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docURLLink.__init__(self, mixedclass_, content_) -supermod.docURLLink.subclass = docURLLinkSub -# end class docURLLinkSub - - -class docAnchorTypeSub(supermod.docAnchorType): - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docAnchorType.__init__(self, mixedclass_, content_) -supermod.docAnchorType.subclass = docAnchorTypeSub -# end class docAnchorTypeSub - - -class docFormulaTypeSub(supermod.docFormulaType): - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docFormulaType.__init__(self, mixedclass_, content_) -supermod.docFormulaType.subclass = docFormulaTypeSub -# end class docFormulaTypeSub - - -class docIndexEntryTypeSub(supermod.docIndexEntryType): - def __init__(self, primaryie='', secondaryie=''): - supermod.docIndexEntryType.__init__(self, primaryie, secondaryie) -supermod.docIndexEntryType.subclass = docIndexEntryTypeSub -# end class docIndexEntryTypeSub - - -class docListTypeSub(supermod.docListType): - def __init__(self, listitem=None): - supermod.docListType.__init__(self, listitem) -supermod.docListType.subclass = docListTypeSub -# end class docListTypeSub - - -class docListItemTypeSub(supermod.docListItemType): - def __init__(self, para=None): - supermod.docListItemType.__init__(self, para) -supermod.docListItemType.subclass = docListItemTypeSub -# end class docListItemTypeSub - - -class docSimpleSectTypeSub(supermod.docSimpleSectType): - def __init__(self, kind=None, title=None, para=None): - supermod.docSimpleSectType.__init__(self, kind, title, para) -supermod.docSimpleSectType.subclass = docSimpleSectTypeSub -# end class docSimpleSectTypeSub - - -class docVarListEntryTypeSub(supermod.docVarListEntryType): - def __init__(self, term=None): - supermod.docVarListEntryType.__init__(self, term) -supermod.docVarListEntryType.subclass = docVarListEntryTypeSub -# end class docVarListEntryTypeSub - - -class docRefTextTypeSub(supermod.docRefTextType): - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docRefTextType.__init__(self, mixedclass_, content_) -supermod.docRefTextType.subclass = docRefTextTypeSub -# end class docRefTextTypeSub - - -class docTableTypeSub(supermod.docTableType): - def __init__(self, rows=None, cols=None, row=None, caption=None): - supermod.docTableType.__init__(self, rows, cols, row, caption) -supermod.docTableType.subclass = docTableTypeSub -# end class docTableTypeSub - - -class docRowTypeSub(supermod.docRowType): - def __init__(self, entry=None): - supermod.docRowType.__init__(self, entry) -supermod.docRowType.subclass = docRowTypeSub -# end class docRowTypeSub - - -class docEntryTypeSub(supermod.docEntryType): - def __init__(self, thead=None, para=None): - supermod.docEntryType.__init__(self, thead, para) -supermod.docEntryType.subclass = docEntryTypeSub -# end class docEntryTypeSub - - -class docHeadingTypeSub(supermod.docHeadingType): - def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docHeadingType.__init__(self, mixedclass_, content_) -supermod.docHeadingType.subclass = docHeadingTypeSub -# end class docHeadingTypeSub - - -class docImageTypeSub(supermod.docImageType): - def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docImageType.__init__(self, mixedclass_, content_) -supermod.docImageType.subclass = docImageTypeSub -# end class docImageTypeSub - - -class docDotFileTypeSub(supermod.docDotFileType): - def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docDotFileType.__init__(self, mixedclass_, content_) -supermod.docDotFileType.subclass = docDotFileTypeSub -# end class docDotFileTypeSub - - -class docTocItemTypeSub(supermod.docTocItemType): - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - supermod.docTocItemType.__init__(self, mixedclass_, content_) -supermod.docTocItemType.subclass = docTocItemTypeSub -# end class docTocItemTypeSub - - -class docTocListTypeSub(supermod.docTocListType): - def __init__(self, tocitem=None): - supermod.docTocListType.__init__(self, tocitem) -supermod.docTocListType.subclass = docTocListTypeSub -# end class docTocListTypeSub - - -class docLanguageTypeSub(supermod.docLanguageType): - def __init__(self, langid=None, para=None): - supermod.docLanguageType.__init__(self, langid, para) -supermod.docLanguageType.subclass = docLanguageTypeSub -# end class docLanguageTypeSub - - -class docParamListTypeSub(supermod.docParamListType): - def __init__(self, kind=None, parameteritem=None): - supermod.docParamListType.__init__(self, kind, parameteritem) -supermod.docParamListType.subclass = docParamListTypeSub -# end class docParamListTypeSub - - -class docParamListItemSub(supermod.docParamListItem): - def __init__(self, parameternamelist=None, parameterdescription=None): - supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription) -supermod.docParamListItem.subclass = docParamListItemSub -# end class docParamListItemSub - - -class docParamNameListSub(supermod.docParamNameList): - def __init__(self, parametername=None): - supermod.docParamNameList.__init__(self, parametername) -supermod.docParamNameList.subclass = docParamNameListSub -# end class docParamNameListSub - - -class docParamNameSub(supermod.docParamName): - def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): - supermod.docParamName.__init__(self, mixedclass_, content_) -supermod.docParamName.subclass = docParamNameSub -# end class docParamNameSub - - -class docXRefSectTypeSub(supermod.docXRefSectType): - def __init__(self, id=None, xreftitle=None, xrefdescription=None): - supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription) -supermod.docXRefSectType.subclass = docXRefSectTypeSub -# end class docXRefSectTypeSub - - -class docCopyTypeSub(supermod.docCopyType): - def __init__(self, link=None, para=None, sect1=None, internal=None): - supermod.docCopyType.__init__(self, link, para, sect1, internal) -supermod.docCopyType.subclass = docCopyTypeSub -# end class docCopyTypeSub - - -class docCharTypeSub(supermod.docCharType): - def __init__(self, char=None, valueOf_=''): - supermod.docCharType.__init__(self, char) -supermod.docCharType.subclass = docCharTypeSub -# end class docCharTypeSub - -class docParaTypeSub(supermod.docParaType): - def __init__(self, char=None, valueOf_=''): - supermod.docParaType.__init__(self, char) - - self.parameterlist = [] - self.simplesects = [] - self.content = [] - - def buildChildren(self, child_, nodeName_): - supermod.docParaType.buildChildren(self, child_, nodeName_) - - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == "ref": - obj_ = supermod.docRefTextType.factory() - obj_.build(child_) - self.content.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameterlist': - obj_ = supermod.docParamListType.factory() - obj_.build(child_) - self.parameterlist.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'simplesect': - obj_ = supermod.docSimpleSectType.factory() - obj_.build(child_) - self.simplesects.append(obj_) - - -supermod.docParaType.subclass = docParaTypeSub -# end class docParaTypeSub - - - -def parse(inFilename): - doc = minidom.parse(inFilename) - rootNode = doc.documentElement - rootObj = supermod.DoxygenType.factory() - rootObj.build(rootNode) - return rootObj - - diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.py b/docs/doxygen/doxyxml/generated/compoundsuper.py deleted file mode 100644 index 6255dda..0000000 --- a/docs/doxygen/doxyxml/generated/compoundsuper.py +++ /dev/null @@ -1,8342 +0,0 @@ -#!/usr/bin/env python - -# -# Generated Thu Jun 11 18:44:25 2009 by generateDS.py. -# - -import sys -import getopt -from string import lower as str_lower -from xml.dom import minidom -from xml.dom import Node - -# -# User methods -# -# Calls to the methods in these classes are generated by generateDS.py. -# You can replace these methods by re-implementing the following class -# in a module named generatedssuper.py. - -try: - from generatedssuper import GeneratedsSuper -except ImportError, exp: - - class GeneratedsSuper: - def format_string(self, input_data, input_name=''): - return input_data - def format_integer(self, input_data, input_name=''): - return '%d' % input_data - def format_float(self, input_data, input_name=''): - return '%f' % input_data - def format_double(self, input_data, input_name=''): - return '%e' % input_data - def format_boolean(self, input_data, input_name=''): - return '%s' % input_data - - -# -# If you have installed IPython you can uncomment and use the following. -# IPython is available from http://ipython.scipy.org/. -# - -## from IPython.Shell import IPShellEmbed -## args = '' -## ipshell = IPShellEmbed(args, -## banner = 'Dropping into IPython', -## exit_msg = 'Leaving Interpreter, back to program.') - -# Then use the following line where and when you want to drop into the -# IPython shell: -# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') - -# -# Globals -# - -ExternalEncoding = 'ascii' - -# -# Support/utility functions. -# - -def showIndent(outfile, level): - for idx in range(level): - outfile.write(' ') - -def quote_xml(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - return s1 - -def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - if '"' in s1: - if "'" in s1: - s1 = '"%s"' % s1.replace('"', """) - else: - s1 = "'%s'" % s1 - else: - s1 = '"%s"' % s1 - return s1 - -def quote_python(inStr): - s1 = inStr - if s1.find("'") == -1: - if s1.find('\n') == -1: - return "'%s'" % s1 - else: - return "'''%s'''" % s1 - else: - if s1.find('"') != -1: - s1 = s1.replace('"', '\\"') - if s1.find('\n') == -1: - return '"%s"' % s1 - else: - return '"""%s"""' % s1 - - -class MixedContainer: - # Constants for category: - CategoryNone = 0 - CategoryText = 1 - CategorySimple = 2 - CategoryComplex = 3 - # Constants for content_type: - TypeNone = 0 - TypeText = 1 - TypeString = 2 - TypeInteger = 3 - TypeFloat = 4 - TypeDecimal = 5 - TypeDouble = 6 - TypeBoolean = 7 - def __init__(self, category, content_type, name, value): - self.category = category - self.content_type = content_type - self.name = name - self.value = value - def getCategory(self): - return self.category - def getContenttype(self, content_type): - return self.content_type - def getValue(self): - return self.value - def getName(self): - return self.name - def export(self, outfile, level, name, namespace): - if self.category == MixedContainer.CategoryText: - outfile.write(self.value) - elif self.category == MixedContainer.CategorySimple: - self.exportSimple(outfile, level, name) - else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace,name) - def exportSimple(self, outfile, level, name): - if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeInteger or \ - self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeFloat or \ - self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) - def exportLiteral(self, outfile, level, name): - if self.category == MixedContainer.CategoryText: - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) - elif self.category == MixedContainer.CategorySimple: - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) - else: # category == MixedContainer.CategoryComplex - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) - self.value.exportLiteral(outfile, level + 1) - showIndent(outfile, level) - outfile.write(')\n') - - -class _MemberSpec(object): - def __init__(self, name='', data_type='', container=0): - self.name = name - self.data_type = data_type - self.container = container - def set_name(self, name): self.name = name - def get_name(self): return self.name - def set_data_type(self, data_type): self.data_type = data_type - def get_data_type(self): return self.data_type - def set_container(self, container): self.container = container - def get_container(self): return self.container - - -# -# Data representation classes. -# - -class DoxygenType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, version=None, compounddef=None): - self.version = version - self.compounddef = compounddef - def factory(*args_, **kwargs_): - if DoxygenType.subclass: - return DoxygenType.subclass(*args_, **kwargs_) - else: - return DoxygenType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compounddef(self): return self.compounddef - def set_compounddef(self, compounddef): self.compounddef = compounddef - def get_version(self): return self.version - def set_version(self, version): self.version = version - def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): - outfile.write(' version=%s' % (quote_attrib(self.version), )) - def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): - if self.compounddef: - self.compounddef.export(outfile, level, namespace_, name_='compounddef') - def hasContent_(self): - if ( - self.compounddef is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='DoxygenType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.version is not None: - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.compounddef: - showIndent(outfile, level) - outfile.write('compounddef=model_.compounddefType(\n') - self.compounddef.exportLiteral(outfile, level, name_='compounddef') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('version'): - self.version = attrs.get('version').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compounddef': - obj_ = compounddefType.factory() - obj_.build(child_) - self.set_compounddef(obj_) -# end class DoxygenType - - -class compounddefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): - self.kind = kind - self.prot = prot - self.id = id - self.compoundname = compoundname - self.title = title - if basecompoundref is None: - self.basecompoundref = [] - else: - self.basecompoundref = basecompoundref - if derivedcompoundref is None: - self.derivedcompoundref = [] - else: - self.derivedcompoundref = derivedcompoundref - if includes is None: - self.includes = [] - else: - self.includes = includes - if includedby is None: - self.includedby = [] - else: - self.includedby = includedby - self.incdepgraph = incdepgraph - self.invincdepgraph = invincdepgraph - if innerdir is None: - self.innerdir = [] - else: - self.innerdir = innerdir - if innerfile is None: - self.innerfile = [] - else: - self.innerfile = innerfile - if innerclass is None: - self.innerclass = [] - else: - self.innerclass = innerclass - if innernamespace is None: - self.innernamespace = [] - else: - self.innernamespace = innernamespace - if innerpage is None: - self.innerpage = [] - else: - self.innerpage = innerpage - if innergroup is None: - self.innergroup = [] - else: - self.innergroup = innergroup - self.templateparamlist = templateparamlist - if sectiondef is None: - self.sectiondef = [] - else: - self.sectiondef = sectiondef - self.briefdescription = briefdescription - self.detaileddescription = detaileddescription - self.inheritancegraph = inheritancegraph - self.collaborationgraph = collaborationgraph - self.programlisting = programlisting - self.location = location - self.listofallmembers = listofallmembers - def factory(*args_, **kwargs_): - if compounddefType.subclass: - return compounddefType.subclass(*args_, **kwargs_) - else: - return compounddefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compoundname(self): return self.compoundname - def set_compoundname(self, compoundname): self.compoundname = compoundname - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_basecompoundref(self): return self.basecompoundref - def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref - def add_basecompoundref(self, value): self.basecompoundref.append(value) - def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value - def get_derivedcompoundref(self): return self.derivedcompoundref - def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref - def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) - def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value - def get_includes(self): return self.includes - def set_includes(self, includes): self.includes = includes - def add_includes(self, value): self.includes.append(value) - def insert_includes(self, index, value): self.includes[index] = value - def get_includedby(self): return self.includedby - def set_includedby(self, includedby): self.includedby = includedby - def add_includedby(self, value): self.includedby.append(value) - def insert_includedby(self, index, value): self.includedby[index] = value - def get_incdepgraph(self): return self.incdepgraph - def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph - def get_invincdepgraph(self): return self.invincdepgraph - def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph - def get_innerdir(self): return self.innerdir - def set_innerdir(self, innerdir): self.innerdir = innerdir - def add_innerdir(self, value): self.innerdir.append(value) - def insert_innerdir(self, index, value): self.innerdir[index] = value - def get_innerfile(self): return self.innerfile - def set_innerfile(self, innerfile): self.innerfile = innerfile - def add_innerfile(self, value): self.innerfile.append(value) - def insert_innerfile(self, index, value): self.innerfile[index] = value - def get_innerclass(self): return self.innerclass - def set_innerclass(self, innerclass): self.innerclass = innerclass - def add_innerclass(self, value): self.innerclass.append(value) - def insert_innerclass(self, index, value): self.innerclass[index] = value - def get_innernamespace(self): return self.innernamespace - def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace - def add_innernamespace(self, value): self.innernamespace.append(value) - def insert_innernamespace(self, index, value): self.innernamespace[index] = value - def get_innerpage(self): return self.innerpage - def set_innerpage(self, innerpage): self.innerpage = innerpage - def add_innerpage(self, value): self.innerpage.append(value) - def insert_innerpage(self, index, value): self.innerpage[index] = value - def get_innergroup(self): return self.innergroup - def set_innergroup(self, innergroup): self.innergroup = innergroup - def add_innergroup(self, value): self.innergroup.append(value) - def insert_innergroup(self, index, value): self.innergroup[index] = value - def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist - def get_sectiondef(self): return self.sectiondef - def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef - def add_sectiondef(self, value): self.sectiondef.append(value) - def insert_sectiondef(self, index, value): self.sectiondef[index] = value - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_inheritancegraph(self): return self.inheritancegraph - def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph - def get_collaborationgraph(self): return self.collaborationgraph - def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph - def get_programlisting(self): return self.programlisting - def set_programlisting(self, programlisting): self.programlisting = programlisting - def get_location(self): return self.location - def set_location(self, location): self.location = location - def get_listofallmembers(self): return self.listofallmembers - def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='compounddefType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): - if self.compoundname is not None: - showIndent(outfile, level) - outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) - if self.title is not None: - showIndent(outfile, level) - outfile.write('<%stitle>%s\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) - for basecompoundref_ in self.basecompoundref: - basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') - for derivedcompoundref_ in self.derivedcompoundref: - derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') - for includes_ in self.includes: - includes_.export(outfile, level, namespace_, name_='includes') - for includedby_ in self.includedby: - includedby_.export(outfile, level, namespace_, name_='includedby') - if self.incdepgraph: - self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') - if self.invincdepgraph: - self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') - for innerdir_ in self.innerdir: - innerdir_.export(outfile, level, namespace_, name_='innerdir') - for innerfile_ in self.innerfile: - innerfile_.export(outfile, level, namespace_, name_='innerfile') - for innerclass_ in self.innerclass: - innerclass_.export(outfile, level, namespace_, name_='innerclass') - for innernamespace_ in self.innernamespace: - innernamespace_.export(outfile, level, namespace_, name_='innernamespace') - for innerpage_ in self.innerpage: - innerpage_.export(outfile, level, namespace_, name_='innerpage') - for innergroup_ in self.innergroup: - innergroup_.export(outfile, level, namespace_, name_='innergroup') - if self.templateparamlist: - self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') - for sectiondef_ in self.sectiondef: - sectiondef_.export(outfile, level, namespace_, name_='sectiondef') - if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') - if self.detaileddescription: - self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') - if self.inheritancegraph: - self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') - if self.collaborationgraph: - self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') - if self.programlisting: - self.programlisting.export(outfile, level, namespace_, name_='programlisting') - if self.location: - self.location.export(outfile, level, namespace_, name_='location') - if self.listofallmembers: - self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') - def hasContent_(self): - if ( - self.compoundname is not None or - self.title is not None or - self.basecompoundref is not None or - self.derivedcompoundref is not None or - self.includes is not None or - self.includedby is not None or - self.incdepgraph is not None or - self.invincdepgraph is not None or - self.innerdir is not None or - self.innerfile is not None or - self.innerclass is not None or - self.innernamespace is not None or - self.innerpage is not None or - self.innergroup is not None or - self.templateparamlist is not None or - self.sectiondef is not None or - self.briefdescription is not None or - self.detaileddescription is not None or - self.inheritancegraph is not None or - self.collaborationgraph is not None or - self.programlisting is not None or - self.location is not None or - self.listofallmembers is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='compounddefType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) - if self.title: - showIndent(outfile, level) - outfile.write('title=model_.xsd_string(\n') - self.title.exportLiteral(outfile, level, name_='title') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('basecompoundref=[\n') - level += 1 - for basecompoundref in self.basecompoundref: - showIndent(outfile, level) - outfile.write('model_.basecompoundref(\n') - basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('derivedcompoundref=[\n') - level += 1 - for derivedcompoundref in self.derivedcompoundref: - showIndent(outfile, level) - outfile.write('model_.derivedcompoundref(\n') - derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('includes=[\n') - level += 1 - for includes in self.includes: - showIndent(outfile, level) - outfile.write('model_.includes(\n') - includes.exportLiteral(outfile, level, name_='includes') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('includedby=[\n') - level += 1 - for includedby in self.includedby: - showIndent(outfile, level) - outfile.write('model_.includedby(\n') - includedby.exportLiteral(outfile, level, name_='includedby') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.incdepgraph: - showIndent(outfile, level) - outfile.write('incdepgraph=model_.graphType(\n') - self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') - showIndent(outfile, level) - outfile.write('),\n') - if self.invincdepgraph: - showIndent(outfile, level) - outfile.write('invincdepgraph=model_.graphType(\n') - self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('innerdir=[\n') - level += 1 - for innerdir in self.innerdir: - showIndent(outfile, level) - outfile.write('model_.innerdir(\n') - innerdir.exportLiteral(outfile, level, name_='innerdir') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('innerfile=[\n') - level += 1 - for innerfile in self.innerfile: - showIndent(outfile, level) - outfile.write('model_.innerfile(\n') - innerfile.exportLiteral(outfile, level, name_='innerfile') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('innerclass=[\n') - level += 1 - for innerclass in self.innerclass: - showIndent(outfile, level) - outfile.write('model_.innerclass(\n') - innerclass.exportLiteral(outfile, level, name_='innerclass') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('innernamespace=[\n') - level += 1 - for innernamespace in self.innernamespace: - showIndent(outfile, level) - outfile.write('model_.innernamespace(\n') - innernamespace.exportLiteral(outfile, level, name_='innernamespace') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('innerpage=[\n') - level += 1 - for innerpage in self.innerpage: - showIndent(outfile, level) - outfile.write('model_.innerpage(\n') - innerpage.exportLiteral(outfile, level, name_='innerpage') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('innergroup=[\n') - level += 1 - for innergroup in self.innergroup: - showIndent(outfile, level) - outfile.write('model_.innergroup(\n') - innergroup.exportLiteral(outfile, level, name_='innergroup') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.templateparamlist: - showIndent(outfile, level) - outfile.write('templateparamlist=model_.templateparamlistType(\n') - self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('sectiondef=[\n') - level += 1 - for sectiondef in self.sectiondef: - showIndent(outfile, level) - outfile.write('model_.sectiondef(\n') - sectiondef.exportLiteral(outfile, level, name_='sectiondef') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.briefdescription: - showIndent(outfile, level) - outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') - showIndent(outfile, level) - outfile.write('),\n') - if self.detaileddescription: - showIndent(outfile, level) - outfile.write('detaileddescription=model_.descriptionType(\n') - self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') - showIndent(outfile, level) - outfile.write('),\n') - if self.inheritancegraph: - showIndent(outfile, level) - outfile.write('inheritancegraph=model_.graphType(\n') - self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') - showIndent(outfile, level) - outfile.write('),\n') - if self.collaborationgraph: - showIndent(outfile, level) - outfile.write('collaborationgraph=model_.graphType(\n') - self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') - showIndent(outfile, level) - outfile.write('),\n') - if self.programlisting: - showIndent(outfile, level) - outfile.write('programlisting=model_.listingType(\n') - self.programlisting.exportLiteral(outfile, level, name_='programlisting') - showIndent(outfile, level) - outfile.write('),\n') - if self.location: - showIndent(outfile, level) - outfile.write('location=model_.locationType(\n') - self.location.exportLiteral(outfile, level, name_='location') - showIndent(outfile, level) - outfile.write('),\n') - if self.listofallmembers: - showIndent(outfile, level) - outfile.write('listofallmembers=model_.listofallmembersType(\n') - self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compoundname': - compoundname_ = '' - for text__content_ in child_.childNodes: - compoundname_ += text__content_.nodeValue - self.compoundname = compoundname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_title(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'basecompoundref': - obj_ = compoundRefType.factory() - obj_.build(child_) - self.basecompoundref.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'derivedcompoundref': - obj_ = compoundRefType.factory() - obj_.build(child_) - self.derivedcompoundref.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includes': - obj_ = incType.factory() - obj_.build(child_) - self.includes.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includedby': - obj_ = incType.factory() - obj_.build(child_) - self.includedby.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'incdepgraph': - obj_ = graphType.factory() - obj_.build(child_) - self.set_incdepgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'invincdepgraph': - obj_ = graphType.factory() - obj_.build(child_) - self.set_invincdepgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerdir': - obj_ = refType.factory() - obj_.build(child_) - self.innerdir.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerfile': - obj_ = refType.factory() - obj_.build(child_) - self.innerfile.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerclass': - obj_ = refType.factory() - obj_.build(child_) - self.innerclass.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innernamespace': - obj_ = refType.factory() - obj_.build(child_) - self.innernamespace.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerpage': - obj_ = refType.factory() - obj_.build(child_) - self.innerpage.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innergroup': - obj_ = refType.factory() - obj_.build(child_) - self.innergroup.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': - obj_ = templateparamlistType.factory() - obj_.build(child_) - self.set_templateparamlist(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sectiondef': - obj_ = sectiondefType.factory() - obj_.build(child_) - self.sectiondef.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_detaileddescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inheritancegraph': - obj_ = graphType.factory() - obj_.build(child_) - self.set_inheritancegraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'collaborationgraph': - obj_ = graphType.factory() - obj_.build(child_) - self.set_collaborationgraph(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'programlisting': - obj_ = listingType.factory() - obj_.build(child_) - self.set_programlisting(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': - obj_ = locationType.factory() - obj_.build(child_) - self.set_location(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listofallmembers': - obj_ = listofallmembersType.factory() - obj_.build(child_) - self.set_listofallmembers(obj_) -# end class compounddefType - - -class listofallmembersType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, member=None): - if member is None: - self.member = [] - else: - self.member = member - def factory(*args_, **kwargs_): - if listofallmembersType.subclass: - return listofallmembersType.subclass(*args_, **kwargs_) - else: - return listofallmembersType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_member(self): return self.member - def set_member(self, member): self.member = member - def add_member(self, value): self.member.append(value) - def insert_member(self, index, value): self.member[index] = value - def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): - for member_ in self.member: - member_.export(outfile, level, namespace_, name_='member') - def hasContent_(self): - if ( - self.member is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='listofallmembersType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('member=[\n') - level += 1 - for member in self.member: - showIndent(outfile, level) - outfile.write('model_.member(\n') - member.exportLiteral(outfile, level, name_='member') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': - obj_ = memberRefType.factory() - obj_.build(child_) - self.member.append(obj_) -# end class listofallmembersType - - -class memberRefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): - self.virt = virt - self.prot = prot - self.refid = refid - self.ambiguityscope = ambiguityscope - self.scope = scope - self.name = name - def factory(*args_, **kwargs_): - if memberRefType.subclass: - return memberRefType.subclass(*args_, **kwargs_) - else: - return memberRefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_scope(self): return self.scope - def set_scope(self, scope): self.scope = scope - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_ambiguityscope(self): return self.ambiguityscope - def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope - def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='memberRefType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): - if self.virt is not None: - outfile.write(' virt=%s' % (quote_attrib(self.virt), )) - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.ambiguityscope is not None: - outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) - def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): - if self.scope is not None: - showIndent(outfile, level) - outfile.write('<%sscope>%s\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) - if self.name is not None: - showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) - def hasContent_(self): - if ( - self.scope is not None or - self.name is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='memberRefType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.virt is not None: - showIndent(outfile, level) - outfile.write('virt = "%s",\n' % (self.virt,)) - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - if self.ambiguityscope is not None: - showIndent(outfile, level) - outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('ambiguityscope'): - self.ambiguityscope = attrs.get('ambiguityscope').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'scope': - scope_ = '' - for text__content_ in child_.childNodes: - scope_ += text__content_.nodeValue - self.scope = scope_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ -# end class memberRefType - - -class scope(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if scope.subclass: - return scope.subclass(*args_, **kwargs_) - else: - return scope(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='scope') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='scope'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='scope'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='scope'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class scope - - -class name(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if name.subclass: - return name.subclass(*args_, **kwargs_) - else: - return name(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='name') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='name'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='name'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='name'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class name - - -class compoundRefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.virt = virt - self.prot = prot - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if compoundRefType.subclass: - return compoundRefType.subclass(*args_, **kwargs_) - else: - return compoundRefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): - if self.virt is not None: - outfile.write(' virt=%s' % (quote_attrib(self.virt), )) - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='compoundRefType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.virt is not None: - showIndent(outfile, level) - outfile.write('virt = "%s",\n' % (self.virt,)) - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class compoundRefType - - -class reimplementType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if reimplementType.subclass: - return reimplementType.subclass(*args_, **kwargs_) - else: - return reimplementType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='reimplementType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='reimplementType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class reimplementType - - -class incType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.local = local - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if incType.subclass: - return incType.subclass(*args_, **kwargs_) - else: - return incType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_local(self): return self.local - def set_local(self, local): self.local = local - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='incType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='incType'): - if self.local is not None: - outfile.write(' local=%s' % (quote_attrib(self.local), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='incType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='incType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.local is not None: - showIndent(outfile, level) - outfile.write('local = "%s",\n' % (self.local,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('local'): - self.local = attrs.get('local').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class incType - - -class refType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): - self.prot = prot - self.refid = refid - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if refType.subclass: - return refType.subclass(*args_, **kwargs_) - else: - return refType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='refType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='refType'): - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='refType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='refType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class refType - - -class refTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - self.kindref = kindref - self.external = external - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if refTextType.subclass: - return refTextType.subclass(*args_, **kwargs_) - else: - return refTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_kindref(self): return self.kindref - def set_kindref(self, kindref): self.kindref = kindref - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='refTextType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.kindref is not None: - outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) - if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) - def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='refTextType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - if self.kindref is not None: - showIndent(outfile, level) - outfile.write('kindref = "%s",\n' % (self.kindref,)) - if self.external is not None: - showIndent(outfile, level) - outfile.write('external = %s,\n' % (self.external,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('kindref'): - self.kindref = attrs.get('kindref').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class refTextType - - -class sectiondefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, header=None, description=None, memberdef=None): - self.kind = kind - self.header = header - self.description = description - if memberdef is None: - self.memberdef = [] - else: - self.memberdef = memberdef - def factory(*args_, **kwargs_): - if sectiondefType.subclass: - return sectiondefType.subclass(*args_, **kwargs_) - else: - return sectiondefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_header(self): return self.header - def set_header(self, header): self.header = header - def get_description(self): return self.description - def set_description(self, description): self.description = description - def get_memberdef(self): return self.memberdef - def set_memberdef(self, memberdef): self.memberdef = memberdef - def add_memberdef(self, value): self.memberdef.append(value) - def insert_memberdef(self, index, value): self.memberdef[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): - if self.header is not None: - showIndent(outfile, level) - outfile.write('<%sheader>%s\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) - if self.description: - self.description.export(outfile, level, namespace_, name_='description') - for memberdef_ in self.memberdef: - memberdef_.export(outfile, level, namespace_, name_='memberdef') - def hasContent_(self): - if ( - self.header is not None or - self.description is not None or - self.memberdef is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='sectiondefType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) - if self.description: - showIndent(outfile, level) - outfile.write('description=model_.descriptionType(\n') - self.description.exportLiteral(outfile, level, name_='description') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('memberdef=[\n') - level += 1 - for memberdef in self.memberdef: - showIndent(outfile, level) - outfile.write('model_.memberdef(\n') - memberdef.exportLiteral(outfile, level, name_='memberdef') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'header': - header_ = '' - for text__content_ in child_.childNodes: - header_ += text__content_.nodeValue - self.header = header_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'description': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_description(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'memberdef': - obj_ = memberdefType.factory() - obj_.build(child_) - self.memberdef.append(obj_) -# end class sectiondefType - - -class memberdefType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): - self.initonly = initonly - self.kind = kind - self.volatile = volatile - self.const = const - self.raisexx = raisexx - self.virt = virt - self.readable = readable - self.prot = prot - self.explicit = explicit - self.new = new - self.final = final - self.writable = writable - self.add = add - self.static = static - self.remove = remove - self.sealed = sealed - self.mutable = mutable - self.gettable = gettable - self.inline = inline - self.settable = settable - self.id = id - self.templateparamlist = templateparamlist - self.type_ = type_ - self.definition = definition - self.argsstring = argsstring - self.name = name - self.read = read - self.write = write - self.bitfield = bitfield - if reimplements is None: - self.reimplements = [] - else: - self.reimplements = reimplements - if reimplementedby is None: - self.reimplementedby = [] - else: - self.reimplementedby = reimplementedby - if param is None: - self.param = [] - else: - self.param = param - if enumvalue is None: - self.enumvalue = [] - else: - self.enumvalue = enumvalue - self.initializer = initializer - self.exceptions = exceptions - self.briefdescription = briefdescription - self.detaileddescription = detaileddescription - self.inbodydescription = inbodydescription - self.location = location - if references is None: - self.references = [] - else: - self.references = references - if referencedby is None: - self.referencedby = [] - else: - self.referencedby = referencedby - def factory(*args_, **kwargs_): - if memberdefType.subclass: - return memberdefType.subclass(*args_, **kwargs_) - else: - return memberdefType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_definition(self): return self.definition - def set_definition(self, definition): self.definition = definition - def get_argsstring(self): return self.argsstring - def set_argsstring(self, argsstring): self.argsstring = argsstring - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_read(self): return self.read - def set_read(self, read): self.read = read - def get_write(self): return self.write - def set_write(self, write): self.write = write - def get_bitfield(self): return self.bitfield - def set_bitfield(self, bitfield): self.bitfield = bitfield - def get_reimplements(self): return self.reimplements - def set_reimplements(self, reimplements): self.reimplements = reimplements - def add_reimplements(self, value): self.reimplements.append(value) - def insert_reimplements(self, index, value): self.reimplements[index] = value - def get_reimplementedby(self): return self.reimplementedby - def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby - def add_reimplementedby(self, value): self.reimplementedby.append(value) - def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value - def get_param(self): return self.param - def set_param(self, param): self.param = param - def add_param(self, value): self.param.append(value) - def insert_param(self, index, value): self.param[index] = value - def get_enumvalue(self): return self.enumvalue - def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue - def add_enumvalue(self, value): self.enumvalue.append(value) - def insert_enumvalue(self, index, value): self.enumvalue[index] = value - def get_initializer(self): return self.initializer - def set_initializer(self, initializer): self.initializer = initializer - def get_exceptions(self): return self.exceptions - def set_exceptions(self, exceptions): self.exceptions = exceptions - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_inbodydescription(self): return self.inbodydescription - def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription - def get_location(self): return self.location - def set_location(self, location): self.location = location - def get_references(self): return self.references - def set_references(self, references): self.references = references - def add_references(self, value): self.references.append(value) - def insert_references(self, index, value): self.references[index] = value - def get_referencedby(self): return self.referencedby - def set_referencedby(self, referencedby): self.referencedby = referencedby - def add_referencedby(self, value): self.referencedby.append(value) - def insert_referencedby(self, index, value): self.referencedby[index] = value - def get_initonly(self): return self.initonly - def set_initonly(self, initonly): self.initonly = initonly - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_volatile(self): return self.volatile - def set_volatile(self, volatile): self.volatile = volatile - def get_const(self): return self.const - def set_const(self, const): self.const = const - def get_raise(self): return self.raisexx - def set_raise(self, raisexx): self.raisexx = raisexx - def get_virt(self): return self.virt - def set_virt(self, virt): self.virt = virt - def get_readable(self): return self.readable - def set_readable(self, readable): self.readable = readable - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_explicit(self): return self.explicit - def set_explicit(self, explicit): self.explicit = explicit - def get_new(self): return self.new - def set_new(self, new): self.new = new - def get_final(self): return self.final - def set_final(self, final): self.final = final - def get_writable(self): return self.writable - def set_writable(self, writable): self.writable = writable - def get_add(self): return self.add - def set_add(self, add): self.add = add - def get_static(self): return self.static - def set_static(self, static): self.static = static - def get_remove(self): return self.remove - def set_remove(self, remove): self.remove = remove - def get_sealed(self): return self.sealed - def set_sealed(self, sealed): self.sealed = sealed - def get_mutable(self): return self.mutable - def set_mutable(self, mutable): self.mutable = mutable - def get_gettable(self): return self.gettable - def set_gettable(self, gettable): self.gettable = gettable - def get_inline(self): return self.inline - def set_inline(self, inline): self.inline = inline - def get_settable(self): return self.settable - def set_settable(self, settable): self.settable = settable - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='memberdefType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): - if self.initonly is not None: - outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - if self.volatile is not None: - outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) - if self.const is not None: - outfile.write(' const=%s' % (quote_attrib(self.const), )) - if self.raisexx is not None: - outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) - if self.virt is not None: - outfile.write(' virt=%s' % (quote_attrib(self.virt), )) - if self.readable is not None: - outfile.write(' readable=%s' % (quote_attrib(self.readable), )) - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.explicit is not None: - outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) - if self.new is not None: - outfile.write(' new=%s' % (quote_attrib(self.new), )) - if self.final is not None: - outfile.write(' final=%s' % (quote_attrib(self.final), )) - if self.writable is not None: - outfile.write(' writable=%s' % (quote_attrib(self.writable), )) - if self.add is not None: - outfile.write(' add=%s' % (quote_attrib(self.add), )) - if self.static is not None: - outfile.write(' static=%s' % (quote_attrib(self.static), )) - if self.remove is not None: - outfile.write(' remove=%s' % (quote_attrib(self.remove), )) - if self.sealed is not None: - outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) - if self.mutable is not None: - outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) - if self.gettable is not None: - outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) - if self.inline is not None: - outfile.write(' inline=%s' % (quote_attrib(self.inline), )) - if self.settable is not None: - outfile.write(' settable=%s' % (quote_attrib(self.settable), )) - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): - if self.templateparamlist: - self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') - if self.type_: - self.type_.export(outfile, level, namespace_, name_='type') - if self.definition is not None: - showIndent(outfile, level) - outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) - if self.argsstring is not None: - showIndent(outfile, level) - outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) - if self.name is not None: - showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) - if self.read is not None: - showIndent(outfile, level) - outfile.write('<%sread>%s\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) - if self.write is not None: - showIndent(outfile, level) - outfile.write('<%swrite>%s\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) - if self.bitfield is not None: - showIndent(outfile, level) - outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) - for reimplements_ in self.reimplements: - reimplements_.export(outfile, level, namespace_, name_='reimplements') - for reimplementedby_ in self.reimplementedby: - reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') - for param_ in self.param: - param_.export(outfile, level, namespace_, name_='param') - for enumvalue_ in self.enumvalue: - enumvalue_.export(outfile, level, namespace_, name_='enumvalue') - if self.initializer: - self.initializer.export(outfile, level, namespace_, name_='initializer') - if self.exceptions: - self.exceptions.export(outfile, level, namespace_, name_='exceptions') - if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') - if self.detaileddescription: - self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') - if self.inbodydescription: - self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') - if self.location: - self.location.export(outfile, level, namespace_, name_='location', ) - for references_ in self.references: - references_.export(outfile, level, namespace_, name_='references') - for referencedby_ in self.referencedby: - referencedby_.export(outfile, level, namespace_, name_='referencedby') - def hasContent_(self): - if ( - self.templateparamlist is not None or - self.type_ is not None or - self.definition is not None or - self.argsstring is not None or - self.name is not None or - self.read is not None or - self.write is not None or - self.bitfield is not None or - self.reimplements is not None or - self.reimplementedby is not None or - self.param is not None or - self.enumvalue is not None or - self.initializer is not None or - self.exceptions is not None or - self.briefdescription is not None or - self.detaileddescription is not None or - self.inbodydescription is not None or - self.location is not None or - self.references is not None or - self.referencedby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='memberdefType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.initonly is not None: - showIndent(outfile, level) - outfile.write('initonly = "%s",\n' % (self.initonly,)) - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - if self.volatile is not None: - showIndent(outfile, level) - outfile.write('volatile = "%s",\n' % (self.volatile,)) - if self.const is not None: - showIndent(outfile, level) - outfile.write('const = "%s",\n' % (self.const,)) - if self.raisexx is not None: - showIndent(outfile, level) - outfile.write('raisexx = "%s",\n' % (self.raisexx,)) - if self.virt is not None: - showIndent(outfile, level) - outfile.write('virt = "%s",\n' % (self.virt,)) - if self.readable is not None: - showIndent(outfile, level) - outfile.write('readable = "%s",\n' % (self.readable,)) - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.explicit is not None: - showIndent(outfile, level) - outfile.write('explicit = "%s",\n' % (self.explicit,)) - if self.new is not None: - showIndent(outfile, level) - outfile.write('new = "%s",\n' % (self.new,)) - if self.final is not None: - showIndent(outfile, level) - outfile.write('final = "%s",\n' % (self.final,)) - if self.writable is not None: - showIndent(outfile, level) - outfile.write('writable = "%s",\n' % (self.writable,)) - if self.add is not None: - showIndent(outfile, level) - outfile.write('add = "%s",\n' % (self.add,)) - if self.static is not None: - showIndent(outfile, level) - outfile.write('static = "%s",\n' % (self.static,)) - if self.remove is not None: - showIndent(outfile, level) - outfile.write('remove = "%s",\n' % (self.remove,)) - if self.sealed is not None: - showIndent(outfile, level) - outfile.write('sealed = "%s",\n' % (self.sealed,)) - if self.mutable is not None: - showIndent(outfile, level) - outfile.write('mutable = "%s",\n' % (self.mutable,)) - if self.gettable is not None: - showIndent(outfile, level) - outfile.write('gettable = "%s",\n' % (self.gettable,)) - if self.inline is not None: - showIndent(outfile, level) - outfile.write('inline = "%s",\n' % (self.inline,)) - if self.settable is not None: - showIndent(outfile, level) - outfile.write('settable = "%s",\n' % (self.settable,)) - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.templateparamlist: - showIndent(outfile, level) - outfile.write('templateparamlist=model_.templateparamlistType(\n') - self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') - showIndent(outfile, level) - outfile.write('),\n') - if self.type_: - showIndent(outfile, level) - outfile.write('type_=model_.linkedTextType(\n') - self.type_.exportLiteral(outfile, level, name_='type') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('reimplements=[\n') - level += 1 - for reimplements in self.reimplements: - showIndent(outfile, level) - outfile.write('model_.reimplements(\n') - reimplements.exportLiteral(outfile, level, name_='reimplements') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('reimplementedby=[\n') - level += 1 - for reimplementedby in self.reimplementedby: - showIndent(outfile, level) - outfile.write('model_.reimplementedby(\n') - reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('param=[\n') - level += 1 - for param in self.param: - showIndent(outfile, level) - outfile.write('model_.param(\n') - param.exportLiteral(outfile, level, name_='param') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('enumvalue=[\n') - level += 1 - for enumvalue in self.enumvalue: - showIndent(outfile, level) - outfile.write('model_.enumvalue(\n') - enumvalue.exportLiteral(outfile, level, name_='enumvalue') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.initializer: - showIndent(outfile, level) - outfile.write('initializer=model_.linkedTextType(\n') - self.initializer.exportLiteral(outfile, level, name_='initializer') - showIndent(outfile, level) - outfile.write('),\n') - if self.exceptions: - showIndent(outfile, level) - outfile.write('exceptions=model_.linkedTextType(\n') - self.exceptions.exportLiteral(outfile, level, name_='exceptions') - showIndent(outfile, level) - outfile.write('),\n') - if self.briefdescription: - showIndent(outfile, level) - outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') - showIndent(outfile, level) - outfile.write('),\n') - if self.detaileddescription: - showIndent(outfile, level) - outfile.write('detaileddescription=model_.descriptionType(\n') - self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') - showIndent(outfile, level) - outfile.write('),\n') - if self.inbodydescription: - showIndent(outfile, level) - outfile.write('inbodydescription=model_.descriptionType(\n') - self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') - showIndent(outfile, level) - outfile.write('),\n') - if self.location: - showIndent(outfile, level) - outfile.write('location=model_.locationType(\n') - self.location.exportLiteral(outfile, level, name_='location') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('references=[\n') - level += 1 - for references in self.references: - showIndent(outfile, level) - outfile.write('model_.references(\n') - references.exportLiteral(outfile, level, name_='references') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('referencedby=[\n') - level += 1 - for referencedby in self.referencedby: - showIndent(outfile, level) - outfile.write('model_.referencedby(\n') - referencedby.exportLiteral(outfile, level, name_='referencedby') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('initonly'): - self.initonly = attrs.get('initonly').value - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('volatile'): - self.volatile = attrs.get('volatile').value - if attrs.get('const'): - self.const = attrs.get('const').value - if attrs.get('raise'): - self.raisexx = attrs.get('raise').value - if attrs.get('virt'): - self.virt = attrs.get('virt').value - if attrs.get('readable'): - self.readable = attrs.get('readable').value - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('explicit'): - self.explicit = attrs.get('explicit').value - if attrs.get('new'): - self.new = attrs.get('new').value - if attrs.get('final'): - self.final = attrs.get('final').value - if attrs.get('writable'): - self.writable = attrs.get('writable').value - if attrs.get('add'): - self.add = attrs.get('add').value - if attrs.get('static'): - self.static = attrs.get('static').value - if attrs.get('remove'): - self.remove = attrs.get('remove').value - if attrs.get('sealed'): - self.sealed = attrs.get('sealed').value - if attrs.get('mutable'): - self.mutable = attrs.get('mutable').value - if attrs.get('gettable'): - self.gettable = attrs.get('gettable').value - if attrs.get('inline'): - self.inline = attrs.get('inline').value - if attrs.get('settable'): - self.settable = attrs.get('settable').value - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': - obj_ = templateparamlistType.factory() - obj_.build(child_) - self.set_templateparamlist(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_type(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'definition': - definition_ = '' - for text__content_ in child_.childNodes: - definition_ += text__content_.nodeValue - self.definition = definition_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'argsstring': - argsstring_ = '' - for text__content_ in child_.childNodes: - argsstring_ += text__content_.nodeValue - self.argsstring = argsstring_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'read': - read_ = '' - for text__content_ in child_.childNodes: - read_ += text__content_.nodeValue - self.read = read_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'write': - write_ = '' - for text__content_ in child_.childNodes: - write_ += text__content_.nodeValue - self.write = write_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'bitfield': - bitfield_ = '' - for text__content_ in child_.childNodes: - bitfield_ += text__content_.nodeValue - self.bitfield = bitfield_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplements': - obj_ = reimplementType.factory() - obj_.build(child_) - self.reimplements.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplementedby': - obj_ = reimplementType.factory() - obj_.build(child_) - self.reimplementedby.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': - obj_ = paramType.factory() - obj_.build(child_) - self.param.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'enumvalue': - obj_ = enumvalueType.factory() - obj_.build(child_) - self.enumvalue.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_initializer(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'exceptions': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_exceptions(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_detaileddescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inbodydescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_inbodydescription(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': - obj_ = locationType.factory() - obj_.build(child_) - self.set_location(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'references': - obj_ = referenceType.factory() - obj_.build(child_) - self.references.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'referencedby': - obj_ = referenceType.factory() - obj_.build(child_) - self.referencedby.append(obj_) -# end class memberdefType - - -class definition(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if definition.subclass: - return definition.subclass(*args_, **kwargs_) - else: - return definition(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='definition') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='definition'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='definition'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='definition'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class definition - - -class argsstring(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if argsstring.subclass: - return argsstring.subclass(*args_, **kwargs_) - else: - return argsstring(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='argsstring') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='argsstring'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class argsstring - - -class read(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if read.subclass: - return read.subclass(*args_, **kwargs_) - else: - return read(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='read') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='read'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='read'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='read'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class read - - -class write(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if write.subclass: - return write.subclass(*args_, **kwargs_) - else: - return write(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='write') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='write'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='write'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='write'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class write - - -class bitfield(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if bitfield.subclass: - return bitfield.subclass(*args_, **kwargs_) - else: - return bitfield(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='bitfield') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='bitfield'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class bitfield - - -class descriptionType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if descriptionType.subclass: - return descriptionType.subclass(*args_, **kwargs_) - else: - return descriptionType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='descriptionType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect1 is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='descriptionType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - childobj_ = docSect1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class descriptionType - - -class enumvalueType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): - self.prot = prot - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if enumvalueType.subclass: - return enumvalueType.subclass(*args_, **kwargs_) - else: - return enumvalueType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_initializer(self): return self.initializer - def set_initializer(self, initializer): self.initializer = initializer - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription - def get_prot(self): return self.prot - def set_prot(self, prot): self.prot = prot - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): - if self.prot is not None: - outfile.write(' prot=%s' % (quote_attrib(self.prot), )) - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.name is not None or - self.initializer is not None or - self.briefdescription is not None or - self.detaileddescription is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='enumvalueType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.prot is not None: - showIndent(outfile, level) - outfile.write('prot = "%s",\n' % (self.prot,)) - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('prot'): - self.prot = attrs.get('prot').value - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - value_ = [] - for text_ in child_.childNodes: - value_.append(text_.nodeValue) - valuestr_ = ''.join(value_) - obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'name', valuestr_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': - childobj_ = linkedTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'initializer', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - childobj_ = descriptionType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'briefdescription', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': - childobj_ = descriptionType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'detaileddescription', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class enumvalueType - - -class templateparamlistType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, param=None): - if param is None: - self.param = [] - else: - self.param = param - def factory(*args_, **kwargs_): - if templateparamlistType.subclass: - return templateparamlistType.subclass(*args_, **kwargs_) - else: - return templateparamlistType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_param(self): return self.param - def set_param(self, param): self.param = param - def add_param(self, value): self.param.append(value) - def insert_param(self, index, value): self.param[index] = value - def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): - for param_ in self.param: - param_.export(outfile, level, namespace_, name_='param') - def hasContent_(self): - if ( - self.param is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='templateparamlistType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('param=[\n') - level += 1 - for param in self.param: - showIndent(outfile, level) - outfile.write('model_.param(\n') - param.exportLiteral(outfile, level, name_='param') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': - obj_ = paramType.factory() - obj_.build(child_) - self.param.append(obj_) -# end class templateparamlistType - - -class paramType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): - self.type_ = type_ - self.declname = declname - self.defname = defname - self.array = array - self.defval = defval - self.briefdescription = briefdescription - def factory(*args_, **kwargs_): - if paramType.subclass: - return paramType.subclass(*args_, **kwargs_) - else: - return paramType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_declname(self): return self.declname - def set_declname(self, declname): self.declname = declname - def get_defname(self): return self.defname - def set_defname(self, defname): self.defname = defname - def get_array(self): return self.array - def set_array(self, array): self.array = array - def get_defval(self): return self.defval - def set_defval(self, defval): self.defval = defval - def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription - def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='paramType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='paramType'): - if self.type_: - self.type_.export(outfile, level, namespace_, name_='type') - if self.declname is not None: - showIndent(outfile, level) - outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) - if self.defname is not None: - showIndent(outfile, level) - outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) - if self.array is not None: - showIndent(outfile, level) - outfile.write('<%sarray>%s\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) - if self.defval: - self.defval.export(outfile, level, namespace_, name_='defval') - if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') - def hasContent_(self): - if ( - self.type_ is not None or - self.declname is not None or - self.defname is not None or - self.array is not None or - self.defval is not None or - self.briefdescription is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='paramType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.type_: - showIndent(outfile, level) - outfile.write('type_=model_.linkedTextType(\n') - self.type_.exportLiteral(outfile, level, name_='type') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) - if self.defval: - showIndent(outfile, level) - outfile.write('defval=model_.linkedTextType(\n') - self.defval.exportLiteral(outfile, level, name_='defval') - showIndent(outfile, level) - outfile.write('),\n') - if self.briefdescription: - showIndent(outfile, level) - outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_type(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'declname': - declname_ = '' - for text__content_ in child_.childNodes: - declname_ += text__content_.nodeValue - self.declname = declname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defname': - defname_ = '' - for text__content_ in child_.childNodes: - defname_ += text__content_.nodeValue - self.defname = defname_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'array': - array_ = '' - for text__content_ in child_.childNodes: - array_ += text__content_.nodeValue - self.array = array_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defval': - obj_ = linkedTextType.factory() - obj_.build(child_) - self.set_defval(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_briefdescription(obj_) -# end class paramType - - -class declname(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if declname.subclass: - return declname.subclass(*args_, **kwargs_) - else: - return declname(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='declname') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='declname'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='declname'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='declname'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class declname - - -class defname(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if defname.subclass: - return defname.subclass(*args_, **kwargs_) - else: - return defname(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='defname') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='defname'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='defname'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='defname'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class defname - - -class array(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if array.subclass: - return array.subclass(*args_, **kwargs_) - else: - return array(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='array') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='array'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='array'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='array'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class array - - -class linkedTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, ref=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if linkedTextType.subclass: - return linkedTextType.subclass(*args_, **kwargs_) - else: - return linkedTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def add_ref(self, value): self.ref.append(value) - def insert_ref(self, index, value): self.ref[index] = value - def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.ref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='linkedTextType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class linkedTextType - - -class graphType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, node=None): - if node is None: - self.node = [] - else: - self.node = node - def factory(*args_, **kwargs_): - if graphType.subclass: - return graphType.subclass(*args_, **kwargs_) - else: - return graphType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_node(self): return self.node - def set_node(self, node): self.node = node - def add_node(self, value): self.node.append(value) - def insert_node(self, index, value): self.node[index] = value - def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='graphType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='graphType'): - for node_ in self.node: - node_.export(outfile, level, namespace_, name_='node') - def hasContent_(self): - if ( - self.node is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='graphType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('node=[\n') - level += 1 - for node in self.node: - showIndent(outfile, level) - outfile.write('model_.node(\n') - node.exportLiteral(outfile, level, name_='node') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'node': - obj_ = nodeType.factory() - obj_.build(child_) - self.node.append(obj_) -# end class graphType - - -class nodeType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, label=None, link=None, childnode=None): - self.id = id - self.label = label - self.link = link - if childnode is None: - self.childnode = [] - else: - self.childnode = childnode - def factory(*args_, **kwargs_): - if nodeType.subclass: - return nodeType.subclass(*args_, **kwargs_) - else: - return nodeType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_label(self): return self.label - def set_label(self, label): self.label = label - def get_link(self): return self.link - def set_link(self, link): self.link = link - def get_childnode(self): return self.childnode - def set_childnode(self, childnode): self.childnode = childnode - def add_childnode(self, value): self.childnode.append(value) - def insert_childnode(self, index, value): self.childnode[index] = value - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='nodeType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): - if self.label is not None: - showIndent(outfile, level) - outfile.write('<%slabel>%s\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) - if self.link: - self.link.export(outfile, level, namespace_, name_='link') - for childnode_ in self.childnode: - childnode_.export(outfile, level, namespace_, name_='childnode') - def hasContent_(self): - if ( - self.label is not None or - self.link is not None or - self.childnode is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='nodeType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) - if self.link: - showIndent(outfile, level) - outfile.write('link=model_.linkType(\n') - self.link.exportLiteral(outfile, level, name_='link') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('childnode=[\n') - level += 1 - for childnode in self.childnode: - showIndent(outfile, level) - outfile.write('model_.childnode(\n') - childnode.exportLiteral(outfile, level, name_='childnode') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'label': - label_ = '' - for text__content_ in child_.childNodes: - label_ += text__content_.nodeValue - self.label = label_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'link': - obj_ = linkType.factory() - obj_.build(child_) - self.set_link(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'childnode': - obj_ = childnodeType.factory() - obj_.build(child_) - self.childnode.append(obj_) -# end class nodeType - - -class label(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if label.subclass: - return label.subclass(*args_, **kwargs_) - else: - return label(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='label') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='label'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='label'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='label'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class label - - -class childnodeType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, relation=None, refid=None, edgelabel=None): - self.relation = relation - self.refid = refid - if edgelabel is None: - self.edgelabel = [] - else: - self.edgelabel = edgelabel - def factory(*args_, **kwargs_): - if childnodeType.subclass: - return childnodeType.subclass(*args_, **kwargs_) - else: - return childnodeType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_edgelabel(self): return self.edgelabel - def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel - def add_edgelabel(self, value): self.edgelabel.append(value) - def insert_edgelabel(self, index, value): self.edgelabel[index] = value - def get_relation(self): return self.relation - def set_relation(self, relation): self.relation = relation - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='childnodeType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): - if self.relation is not None: - outfile.write(' relation=%s' % (quote_attrib(self.relation), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): - for edgelabel_ in self.edgelabel: - showIndent(outfile, level) - outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) - def hasContent_(self): - if ( - self.edgelabel is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='childnodeType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.relation is not None: - showIndent(outfile, level) - outfile.write('relation = "%s",\n' % (self.relation,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('edgelabel=[\n') - level += 1 - for edgelabel in self.edgelabel: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('relation'): - self.relation = attrs.get('relation').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'edgelabel': - edgelabel_ = '' - for text__content_ in child_.childNodes: - edgelabel_ += text__content_.nodeValue - self.edgelabel.append(edgelabel_) -# end class childnodeType - - -class edgelabel(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if edgelabel.subclass: - return edgelabel.subclass(*args_, **kwargs_) - else: - return edgelabel(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='edgelabel') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='edgelabel'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class edgelabel - - -class linkType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, external=None, valueOf_=''): - self.refid = refid - self.external = external - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if linkType.subclass: - return linkType.subclass(*args_, **kwargs_) - else: - return linkType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='linkType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) - def exportChildren(self, outfile, level, namespace_='', name_='linkType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='linkType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - if self.external is not None: - showIndent(outfile, level) - outfile.write('external = %s,\n' % (self.external,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class linkType - - -class listingType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, codeline=None): - if codeline is None: - self.codeline = [] - else: - self.codeline = codeline - def factory(*args_, **kwargs_): - if listingType.subclass: - return listingType.subclass(*args_, **kwargs_) - else: - return listingType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_codeline(self): return self.codeline - def set_codeline(self, codeline): self.codeline = codeline - def add_codeline(self, value): self.codeline.append(value) - def insert_codeline(self, index, value): self.codeline[index] = value - def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='listingType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='listingType'): - for codeline_ in self.codeline: - codeline_.export(outfile, level, namespace_, name_='codeline') - def hasContent_(self): - if ( - self.codeline is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='listingType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('codeline=[\n') - level += 1 - for codeline in self.codeline: - showIndent(outfile, level) - outfile.write('model_.codeline(\n') - codeline.exportLiteral(outfile, level, name_='codeline') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'codeline': - obj_ = codelineType.factory() - obj_.build(child_) - self.codeline.append(obj_) -# end class listingType - - -class codelineType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): - self.external = external - self.lineno = lineno - self.refkind = refkind - self.refid = refid - if highlight is None: - self.highlight = [] - else: - self.highlight = highlight - def factory(*args_, **kwargs_): - if codelineType.subclass: - return codelineType.subclass(*args_, **kwargs_) - else: - return codelineType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_highlight(self): return self.highlight - def set_highlight(self, highlight): self.highlight = highlight - def add_highlight(self, value): self.highlight.append(value) - def insert_highlight(self, index, value): self.highlight[index] = value - def get_external(self): return self.external - def set_external(self, external): self.external = external - def get_lineno(self): return self.lineno - def set_lineno(self, lineno): self.lineno = lineno - def get_refkind(self): return self.refkind - def set_refkind(self, refkind): self.refkind = refkind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='codelineType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): - if self.external is not None: - outfile.write(' external=%s' % (quote_attrib(self.external), )) - if self.lineno is not None: - outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) - if self.refkind is not None: - outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): - for highlight_ in self.highlight: - highlight_.export(outfile, level, namespace_, name_='highlight') - def hasContent_(self): - if ( - self.highlight is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='codelineType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.external is not None: - showIndent(outfile, level) - outfile.write('external = "%s",\n' % (self.external,)) - if self.lineno is not None: - showIndent(outfile, level) - outfile.write('lineno = %s,\n' % (self.lineno,)) - if self.refkind is not None: - showIndent(outfile, level) - outfile.write('refkind = "%s",\n' % (self.refkind,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('highlight=[\n') - level += 1 - for highlight in self.highlight: - showIndent(outfile, level) - outfile.write('model_.highlight(\n') - highlight.exportLiteral(outfile, level, name_='highlight') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('external'): - self.external = attrs.get('external').value - if attrs.get('lineno'): - try: - self.lineno = int(attrs.get('lineno').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (lineno): %s' % exp) - if attrs.get('refkind'): - self.refkind = attrs.get('refkind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'highlight': - obj_ = highlightType.factory() - obj_.build(child_) - self.highlight.append(obj_) -# end class codelineType - - -class highlightType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): - self.classxx = classxx - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if highlightType.subclass: - return highlightType.subclass(*args_, **kwargs_) - else: - return highlightType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_sp(self): return self.sp - def set_sp(self, sp): self.sp = sp - def add_sp(self, value): self.sp.append(value) - def insert_sp(self, index, value): self.sp[index] = value - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def add_ref(self, value): self.ref.append(value) - def insert_ref(self, index, value): self.ref[index] = value - def get_class(self): return self.classxx - def set_class(self, classxx): self.classxx = classxx - def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='highlightType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): - if self.classxx is not None: - outfile.write(' class=%s' % (quote_attrib(self.classxx), )) - def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.sp is not None or - self.ref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='highlightType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.classxx is not None: - showIndent(outfile, level) - outfile.write('classxx = "%s",\n' % (self.classxx,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('class'): - self.classxx = attrs.get('class').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sp': - value_ = [] - for text_ in child_.childNodes: - value_.append(text_.nodeValue) - valuestr_ = ''.join(value_) - obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'sp', valuestr_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class highlightType - - -class sp(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if sp.subclass: - return sp.subclass(*args_, **kwargs_) - else: - return sp(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='sp') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='sp'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='sp'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='sp'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class sp - - -class referenceType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): - self.endline = endline - self.startline = startline - self.refid = refid - self.compoundref = compoundref - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if referenceType.subclass: - return referenceType.subclass(*args_, **kwargs_) - else: - return referenceType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_endline(self): return self.endline - def set_endline(self, endline): self.endline = endline - def get_startline(self): return self.startline - def set_startline(self, startline): self.startline = startline - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_compoundref(self): return self.compoundref - def set_compoundref(self, compoundref): self.compoundref = compoundref - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='referenceType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): - if self.endline is not None: - outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) - if self.startline is not None: - outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.compoundref is not None: - outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) - def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='referenceType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.endline is not None: - showIndent(outfile, level) - outfile.write('endline = %s,\n' % (self.endline,)) - if self.startline is not None: - showIndent(outfile, level) - outfile.write('startline = %s,\n' % (self.startline,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - if self.compoundref is not None: - showIndent(outfile, level) - outfile.write('compoundref = %s,\n' % (self.compoundref,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('endline'): - try: - self.endline = int(attrs.get('endline').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (endline): %s' % exp) - if attrs.get('startline'): - try: - self.startline = int(attrs.get('startline').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (startline): %s' % exp) - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('compoundref'): - self.compoundref = attrs.get('compoundref').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class referenceType - - -class locationType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): - self.bodystart = bodystart - self.line = line - self.bodyend = bodyend - self.bodyfile = bodyfile - self.file = file - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if locationType.subclass: - return locationType.subclass(*args_, **kwargs_) - else: - return locationType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_bodystart(self): return self.bodystart - def set_bodystart(self, bodystart): self.bodystart = bodystart - def get_line(self): return self.line - def set_line(self, line): self.line = line - def get_bodyend(self): return self.bodyend - def set_bodyend(self, bodyend): self.bodyend = bodyend - def get_bodyfile(self): return self.bodyfile - def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile - def get_file(self): return self.file - def set_file(self, file): self.file = file - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='locationType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): - if self.bodystart is not None: - outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) - if self.line is not None: - outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) - if self.bodyend is not None: - outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) - if self.bodyfile is not None: - outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) - if self.file is not None: - outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) - def exportChildren(self, outfile, level, namespace_='', name_='locationType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='locationType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.bodystart is not None: - showIndent(outfile, level) - outfile.write('bodystart = %s,\n' % (self.bodystart,)) - if self.line is not None: - showIndent(outfile, level) - outfile.write('line = %s,\n' % (self.line,)) - if self.bodyend is not None: - showIndent(outfile, level) - outfile.write('bodyend = %s,\n' % (self.bodyend,)) - if self.bodyfile is not None: - showIndent(outfile, level) - outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) - if self.file is not None: - showIndent(outfile, level) - outfile.write('file = %s,\n' % (self.file,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('bodystart'): - try: - self.bodystart = int(attrs.get('bodystart').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (bodystart): %s' % exp) - if attrs.get('line'): - try: - self.line = int(attrs.get('line').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (line): %s' % exp) - if attrs.get('bodyend'): - try: - self.bodyend = int(attrs.get('bodyend').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (bodyend): %s' % exp) - if attrs.get('bodyfile'): - self.bodyfile = attrs.get('bodyfile').value - if attrs.get('file'): - self.file = attrs.get('file').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class locationType - - -class docSect1Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docSect1Type.subclass: - return docSect1Type.subclass(*args_, **kwargs_) - else: - return docSect1Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect2(self): return self.sect2 - def set_sect2(self, sect2): self.sect2 = sect2 - def add_sect2(self, value): self.sect2.append(value) - def insert_sect2(self, index, value): self.sect2[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect2 is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docSect1Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': - childobj_ = docSect2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect1Type - - -class docSect2Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docSect2Type.subclass: - return docSect2Type.subclass(*args_, **kwargs_) - else: - return docSect2Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect3 is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docSect2Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect2Type - - -class docSect3Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docSect3Type.subclass: - return docSect3Type.subclass(*args_, **kwargs_) - else: - return docSect3Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect4(self): return self.sect4 - def set_sect4(self, sect4): self.sect4 = sect4 - def add_sect4(self, value): self.sect4.append(value) - def insert_sect4(self, index, value): self.sect4[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.sect4 is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docSect3Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect4': - childobj_ = docSect4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect4', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect3Type - - -class docSect4Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docSect4Type.subclass: - return docSect4Type.subclass(*args_, **kwargs_) - else: - return docSect4Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.title is not None or - self.para is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docSect4Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - childobj_ = docTitleType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - childobj_ = docInternalS4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docSect4Type - - -class docInternalType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalType.subclass: - return docInternalType.subclass(*args_, **kwargs_) - else: - return docInternalType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect1 is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docInternalType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - childobj_ = docSect1Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalType - - -class docInternalS1Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS1Type.subclass: - return docInternalS1Type.subclass(*args_, **kwargs_) - else: - return docInternalS1Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect2(self): return self.sect2 - def set_sect2(self, sect2): self.sect2 = sect2 - def add_sect2(self, value): self.sect2.append(value) - def insert_sect2(self, index, value): self.sect2[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect2 is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docInternalS1Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': - childobj_ = docSect2Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS1Type - - -class docInternalS2Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS2Type.subclass: - return docInternalS2Type.subclass(*args_, **kwargs_) - else: - return docInternalS2Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect3 is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docInternalS2Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect3Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS2Type - - -class docInternalS3Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS3Type.subclass: - return docInternalS3Type.subclass(*args_, **kwargs_) - else: - return docInternalS3Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect3(self): return self.sect3 - def set_sect3(self, sect3): self.sect3 = sect3 - def add_sect3(self, value): self.sect3.append(value) - def insert_sect3(self, index, value): self.sect3[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None or - self.sect3 is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docInternalS3Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': - childobj_ = docSect4Type.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS3Type - - -class docInternalS4Type(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None, mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docInternalS4Type.subclass: - return docInternalS4Type.subclass(*args_, **kwargs_) - else: - return docInternalS4Type(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docInternalS4Type'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - childobj_ = docParaType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docInternalS4Type - - -class docTitleType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docTitleType.subclass: - return docTitleType.subclass(*args_, **kwargs_) - else: - return docTitleType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTitleType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docTitleType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docTitleType - - -class docParaType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docParaType.subclass: - return docParaType.subclass(*args_, **kwargs_) - else: - return docParaType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParaType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docParaType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docParaType - - -class docMarkupType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docMarkupType.subclass: - return docMarkupType.subclass(*args_, **kwargs_) - else: - return docMarkupType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docMarkupType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docMarkupType - - -class docURLLink(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): - self.url = url - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docURLLink.subclass: - return docURLLink.subclass(*args_, **kwargs_) - else: - return docURLLink(*args_, **kwargs_) - factory = staticmethod(factory) - def get_url(self): return self.url - def set_url(self, url): self.url = url - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docURLLink') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): - if self.url is not None: - outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docURLLink'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.url is not None: - showIndent(outfile, level) - outfile.write('url = %s,\n' % (self.url,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('url'): - self.url = attrs.get('url').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docURLLink - - -class docAnchorType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docAnchorType.subclass: - return docAnchorType.subclass(*args_, **kwargs_) - else: - return docAnchorType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docAnchorType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docAnchorType - - -class docFormulaType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docFormulaType.subclass: - return docFormulaType.subclass(*args_, **kwargs_) - else: - return docFormulaType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docFormulaType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docFormulaType - - -class docIndexEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, primaryie=None, secondaryie=None): - self.primaryie = primaryie - self.secondaryie = secondaryie - def factory(*args_, **kwargs_): - if docIndexEntryType.subclass: - return docIndexEntryType.subclass(*args_, **kwargs_) - else: - return docIndexEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_primaryie(self): return self.primaryie - def set_primaryie(self, primaryie): self.primaryie = primaryie - def get_secondaryie(self): return self.secondaryie - def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie - def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): - if self.primaryie is not None: - showIndent(outfile, level) - outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) - if self.secondaryie is not None: - showIndent(outfile, level) - outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) - def hasContent_(self): - if ( - self.primaryie is not None or - self.secondaryie is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docIndexEntryType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'primaryie': - primaryie_ = '' - for text__content_ in child_.childNodes: - primaryie_ += text__content_.nodeValue - self.primaryie = primaryie_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'secondaryie': - secondaryie_ = '' - for text__content_ in child_.childNodes: - secondaryie_ += text__content_.nodeValue - self.secondaryie = secondaryie_ -# end class docIndexEntryType - - -class docListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, listitem=None): - if listitem is None: - self.listitem = [] - else: - self.listitem = listitem - def factory(*args_, **kwargs_): - if docListType.subclass: - return docListType.subclass(*args_, **kwargs_) - else: - return docListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_listitem(self): return self.listitem - def set_listitem(self, listitem): self.listitem = listitem - def add_listitem(self, value): self.listitem.append(value) - def insert_listitem(self, index, value): self.listitem[index] = value - def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docListType'): - for listitem_ in self.listitem: - listitem_.export(outfile, level, namespace_, name_='listitem') - def hasContent_(self): - if ( - self.listitem is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docListType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('listitem=[\n') - level += 1 - for listitem in self.listitem: - showIndent(outfile, level) - outfile.write('model_.listitem(\n') - listitem.exportLiteral(outfile, level, name_='listitem') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listitem': - obj_ = docListItemType.factory() - obj_.build(child_) - self.listitem.append(obj_) -# end class docListType - - -class docListItemType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, para=None): - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docListItemType.subclass: - return docListItemType.subclass(*args_, **kwargs_) - else: - return docListItemType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docListItemType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docListItemType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('para=[\n') - level += 1 - for para in self.para: - showIndent(outfile, level) - outfile.write('model_.para(\n') - para.exportLiteral(outfile, level, name_='para') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docListItemType - - -class docSimpleSectType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, title=None, para=None): - self.kind = kind - self.title = title - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docSimpleSectType.subclass: - return docSimpleSectType.subclass(*args_, **kwargs_) - else: - return docSimpleSectType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_title(self): return self.title - def set_title(self, title): self.title = title - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): - if self.title: - self.title.export(outfile, level, namespace_, name_='title') - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.title is not None or - self.para is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docSimpleSectType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.title: - showIndent(outfile, level) - outfile.write('title=model_.docTitleType(\n') - self.title.exportLiteral(outfile, level, name_='title') - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('para=[\n') - level += 1 - for para in self.para: - showIndent(outfile, level) - outfile.write('model_.para(\n') - para.exportLiteral(outfile, level, name_='para') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_title(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docSimpleSectType - - -class docVarListEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, term=None): - self.term = term - def factory(*args_, **kwargs_): - if docVarListEntryType.subclass: - return docVarListEntryType.subclass(*args_, **kwargs_) - else: - return docVarListEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_term(self): return self.term - def set_term(self, term): self.term = term - def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): - if self.term: - self.term.export(outfile, level, namespace_, name_='term', ) - def hasContent_(self): - if ( - self.term is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docVarListEntryType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.term: - showIndent(outfile, level) - outfile.write('term=model_.docTitleType(\n') - self.term.exportLiteral(outfile, level, name_='term') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'term': - obj_ = docTitleType.factory() - obj_.build(child_) - self.set_term(obj_) -# end class docVarListEntryType - - -class docVariableListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docVariableListType.subclass: - return docVariableListType.subclass(*args_, **kwargs_) - else: - return docVariableListType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docVariableListType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docVariableListType - - -class docRefTextType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): - self.refid = refid - self.kindref = kindref - self.external = external - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docRefTextType.subclass: - return docRefTextType.subclass(*args_, **kwargs_) - else: - return docRefTextType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def get_kindref(self): return self.kindref - def set_kindref(self, kindref): self.kindref = kindref - def get_external(self): return self.external - def set_external(self, external): self.external = external - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): - if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - if self.kindref is not None: - outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) - if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docRefTextType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - if self.kindref is not None: - showIndent(outfile, level) - outfile.write('kindref = "%s",\n' % (self.kindref,)) - if self.external is not None: - showIndent(outfile, level) - outfile.write('external = %s,\n' % (self.external,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('refid'): - self.refid = attrs.get('refid').value - if attrs.get('kindref'): - self.kindref = attrs.get('kindref').value - if attrs.get('external'): - self.external = attrs.get('external').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docRefTextType - - -class docTableType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, rows=None, cols=None, row=None, caption=None): - self.rows = rows - self.cols = cols - if row is None: - self.row = [] - else: - self.row = row - self.caption = caption - def factory(*args_, **kwargs_): - if docTableType.subclass: - return docTableType.subclass(*args_, **kwargs_) - else: - return docTableType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_row(self): return self.row - def set_row(self, row): self.row = row - def add_row(self, value): self.row.append(value) - def insert_row(self, index, value): self.row[index] = value - def get_caption(self): return self.caption - def set_caption(self, caption): self.caption = caption - def get_rows(self): return self.rows - def set_rows(self, rows): self.rows = rows - def get_cols(self): return self.cols - def set_cols(self, cols): self.cols = cols - def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTableType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): - if self.rows is not None: - outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) - if self.cols is not None: - outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) - def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): - for row_ in self.row: - row_.export(outfile, level, namespace_, name_='row') - if self.caption: - self.caption.export(outfile, level, namespace_, name_='caption') - def hasContent_(self): - if ( - self.row is not None or - self.caption is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docTableType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.rows is not None: - showIndent(outfile, level) - outfile.write('rows = %s,\n' % (self.rows,)) - if self.cols is not None: - showIndent(outfile, level) - outfile.write('cols = %s,\n' % (self.cols,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('row=[\n') - level += 1 - for row in self.row: - showIndent(outfile, level) - outfile.write('model_.row(\n') - row.exportLiteral(outfile, level, name_='row') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.caption: - showIndent(outfile, level) - outfile.write('caption=model_.docCaptionType(\n') - self.caption.exportLiteral(outfile, level, name_='caption') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('rows'): - try: - self.rows = int(attrs.get('rows').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (rows): %s' % exp) - if attrs.get('cols'): - try: - self.cols = int(attrs.get('cols').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (cols): %s' % exp) - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'row': - obj_ = docRowType.factory() - obj_.build(child_) - self.row.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'caption': - obj_ = docCaptionType.factory() - obj_.build(child_) - self.set_caption(obj_) -# end class docTableType - - -class docRowType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, entry=None): - if entry is None: - self.entry = [] - else: - self.entry = entry - def factory(*args_, **kwargs_): - if docRowType.subclass: - return docRowType.subclass(*args_, **kwargs_) - else: - return docRowType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_entry(self): return self.entry - def set_entry(self, entry): self.entry = entry - def add_entry(self, value): self.entry.append(value) - def insert_entry(self, index, value): self.entry[index] = value - def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docRowType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): - for entry_ in self.entry: - entry_.export(outfile, level, namespace_, name_='entry') - def hasContent_(self): - if ( - self.entry is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docRowType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('entry=[\n') - level += 1 - for entry in self.entry: - showIndent(outfile, level) - outfile.write('model_.entry(\n') - entry.exportLiteral(outfile, level, name_='entry') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'entry': - obj_ = docEntryType.factory() - obj_.build(child_) - self.entry.append(obj_) -# end class docRowType - - -class docEntryType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, thead=None, para=None): - self.thead = thead - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docEntryType.subclass: - return docEntryType.subclass(*args_, **kwargs_) - else: - return docEntryType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_thead(self): return self.thead - def set_thead(self, thead): self.thead = thead - def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docEntryType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): - if self.thead is not None: - outfile.write(' thead=%s' % (quote_attrib(self.thead), )) - def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docEntryType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.thead is not None: - showIndent(outfile, level) - outfile.write('thead = "%s",\n' % (self.thead,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('para=[\n') - level += 1 - for para in self.para: - showIndent(outfile, level) - outfile.write('model_.para(\n') - para.exportLiteral(outfile, level, name_='para') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('thead'): - self.thead = attrs.get('thead').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docEntryType - - -class docCaptionType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_='', mixedclass_=None, content_=None): - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docCaptionType.subclass: - return docCaptionType.subclass(*args_, **kwargs_) - else: - return docCaptionType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docCaptionType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docCaptionType - - -class docHeadingType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): - self.level = level - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docHeadingType.subclass: - return docHeadingType.subclass(*args_, **kwargs_) - else: - return docHeadingType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_level(self): return self.level - def set_level(self, level): self.level = level - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): - if self.level is not None: - outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) - def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docHeadingType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.level is not None: - showIndent(outfile, level) - outfile.write('level = %s,\n' % (self.level,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('level'): - try: - self.level = int(attrs.get('level').value) - except ValueError, exp: - raise ValueError('Bad integer attribute (level): %s' % exp) - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docHeadingType - - -class docImageType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): - self.width = width - self.type_ = type_ - self.name = name - self.height = height - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docImageType.subclass: - return docImageType.subclass(*args_, **kwargs_) - else: - return docImageType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_width(self): return self.width - def set_width(self, width): self.width = width - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_height(self): return self.height - def set_height(self, height): self.height = height - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docImageType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): - if self.width is not None: - outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) - if self.type_ is not None: - outfile.write(' type=%s' % (quote_attrib(self.type_), )) - if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.height is not None: - outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docImageType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.width is not None: - showIndent(outfile, level) - outfile.write('width = %s,\n' % (self.width,)) - if self.type_ is not None: - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.name is not None: - showIndent(outfile, level) - outfile.write('name = %s,\n' % (self.name,)) - if self.height is not None: - showIndent(outfile, level) - outfile.write('height = %s,\n' % (self.height,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('width'): - self.width = attrs.get('width').value - if attrs.get('type'): - self.type_ = attrs.get('type').value - if attrs.get('name'): - self.name = attrs.get('name').value - if attrs.get('height'): - self.height = attrs.get('height').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docImageType - - -class docDotFileType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): - self.name = name - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docDotFileType.subclass: - return docDotFileType.subclass(*args_, **kwargs_) - else: - return docDotFileType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): - if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docDotFileType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.name is not None: - showIndent(outfile, level) - outfile.write('name = %s,\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('name'): - self.name = attrs.get('name').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docDotFileType - - -class docTocItemType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): - self.id = id - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docTocItemType.subclass: - return docTocItemType.subclass(*args_, **kwargs_) - else: - return docTocItemType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_id(self): return self.id - def set_id(self, id): self.id = id - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docTocItemType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docTocItemType - - -class docTocListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, tocitem=None): - if tocitem is None: - self.tocitem = [] - else: - self.tocitem = tocitem - def factory(*args_, **kwargs_): - if docTocListType.subclass: - return docTocListType.subclass(*args_, **kwargs_) - else: - return docTocListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_tocitem(self): return self.tocitem - def set_tocitem(self, tocitem): self.tocitem = tocitem - def add_tocitem(self, value): self.tocitem.append(value) - def insert_tocitem(self, index, value): self.tocitem[index] = value - def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): - for tocitem_ in self.tocitem: - tocitem_.export(outfile, level, namespace_, name_='tocitem') - def hasContent_(self): - if ( - self.tocitem is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docTocListType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('tocitem=[\n') - level += 1 - for tocitem in self.tocitem: - showIndent(outfile, level) - outfile.write('model_.tocitem(\n') - tocitem.exportLiteral(outfile, level, name_='tocitem') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'tocitem': - obj_ = docTocItemType.factory() - obj_.build(child_) - self.tocitem.append(obj_) -# end class docTocListType - - -class docLanguageType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, langid=None, para=None): - self.langid = langid - if para is None: - self.para = [] - else: - self.para = para - def factory(*args_, **kwargs_): - if docLanguageType.subclass: - return docLanguageType.subclass(*args_, **kwargs_) - else: - return docLanguageType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_langid(self): return self.langid - def set_langid(self, langid): self.langid = langid - def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): - if self.langid is not None: - outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - def hasContent_(self): - if ( - self.para is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docLanguageType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.langid is not None: - showIndent(outfile, level) - outfile.write('langid = %s,\n' % (self.langid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('para=[\n') - level += 1 - for para in self.para: - showIndent(outfile, level) - outfile.write('model_.para(\n') - para.exportLiteral(outfile, level, name_='para') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('langid'): - self.langid = attrs.get('langid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) -# end class docLanguageType - - -class docParamListType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, parameteritem=None): - self.kind = kind - if parameteritem is None: - self.parameteritem = [] - else: - self.parameteritem = parameteritem - def factory(*args_, **kwargs_): - if docParamListType.subclass: - return docParamListType.subclass(*args_, **kwargs_) - else: - return docParamListType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parameteritem(self): return self.parameteritem - def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem - def add_parameteritem(self, value): self.parameteritem.append(value) - def insert_parameteritem(self, index, value): self.parameteritem[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): - if self.kind is not None: - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): - for parameteritem_ in self.parameteritem: - parameteritem_.export(outfile, level, namespace_, name_='parameteritem') - def hasContent_(self): - if ( - self.parameteritem is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docParamListType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('parameteritem=[\n') - level += 1 - for parameteritem in self.parameteritem: - showIndent(outfile, level) - outfile.write('model_.parameteritem(\n') - parameteritem.exportLiteral(outfile, level, name_='parameteritem') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameteritem': - obj_ = docParamListItem.factory() - obj_.build(child_) - self.parameteritem.append(obj_) -# end class docParamListType - - -class docParamListItem(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, parameternamelist=None, parameterdescription=None): - if parameternamelist is None: - self.parameternamelist = [] - else: - self.parameternamelist = parameternamelist - self.parameterdescription = parameterdescription - def factory(*args_, **kwargs_): - if docParamListItem.subclass: - return docParamListItem.subclass(*args_, **kwargs_) - else: - return docParamListItem(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parameternamelist(self): return self.parameternamelist - def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist - def add_parameternamelist(self, value): self.parameternamelist.append(value) - def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value - def get_parameterdescription(self): return self.parameterdescription - def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription - def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): - for parameternamelist_ in self.parameternamelist: - parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') - if self.parameterdescription: - self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) - def hasContent_(self): - if ( - self.parameternamelist is not None or - self.parameterdescription is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docParamListItem'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('parameternamelist=[\n') - level += 1 - for parameternamelist in self.parameternamelist: - showIndent(outfile, level) - outfile.write('model_.parameternamelist(\n') - parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.parameterdescription: - showIndent(outfile, level) - outfile.write('parameterdescription=model_.descriptionType(\n') - self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameternamelist': - obj_ = docParamNameList.factory() - obj_.build(child_) - self.parameternamelist.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameterdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_parameterdescription(obj_) -# end class docParamListItem - - -class docParamNameList(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, parametername=None): - if parametername is None: - self.parametername = [] - else: - self.parametername = parametername - def factory(*args_, **kwargs_): - if docParamNameList.subclass: - return docParamNameList.subclass(*args_, **kwargs_) - else: - return docParamNameList(*args_, **kwargs_) - factory = staticmethod(factory) - def get_parametername(self): return self.parametername - def set_parametername(self, parametername): self.parametername = parametername - def add_parametername(self, value): self.parametername.append(value) - def insert_parametername(self, index, value): self.parametername[index] = value - def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): - for parametername_ in self.parametername: - parametername_.export(outfile, level, namespace_, name_='parametername') - def hasContent_(self): - if ( - self.parametername is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docParamNameList'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('parametername=[\n') - level += 1 - for parametername in self.parametername: - showIndent(outfile, level) - outfile.write('model_.parametername(\n') - parametername.exportLiteral(outfile, level, name_='parametername') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parametername': - obj_ = docParamName.factory() - obj_.build(child_) - self.parametername.append(obj_) -# end class docParamNameList - - -class docParamName(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): - self.direction = direction - if mixedclass_ is None: - self.mixedclass_ = MixedContainer - else: - self.mixedclass_ = mixedclass_ - if content_ is None: - self.content_ = [] - else: - self.content_ = content_ - def factory(*args_, **kwargs_): - if docParamName.subclass: - return docParamName.subclass(*args_, **kwargs_) - else: - return docParamName(*args_, **kwargs_) - factory = staticmethod(factory) - def get_ref(self): return self.ref - def set_ref(self, ref): self.ref = ref - def get_direction(self): return self.direction - def set_direction(self, direction): self.direction = direction - def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamName') - outfile.write('>') - self.exportChildren(outfile, level + 1, namespace_, name_) - outfile.write('\n' % (namespace_, name_)) - def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): - if self.direction is not None: - outfile.write(' direction=%s' % (quote_attrib(self.direction), )) - def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): - for item_ in self.content_: - item_.export(outfile, level, item_.name, namespace_) - def hasContent_(self): - if ( - self.ref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docParamName'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.direction is not None: - showIndent(outfile, level) - outfile.write('direction = "%s",\n' % (self.direction,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('content_ = [\n') - for item_ in self.content_: - item_.exportLiteral(outfile, level, name_) - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('direction'): - self.direction = attrs.get('direction').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': - childobj_ = docRefTextType.factory() - childobj_.build(child_) - obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) - self.content_.append(obj_) - elif child_.nodeType == Node.TEXT_NODE: - obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) - self.content_.append(obj_) -# end class docParamName - - -class docXRefSectType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, id=None, xreftitle=None, xrefdescription=None): - self.id = id - if xreftitle is None: - self.xreftitle = [] - else: - self.xreftitle = xreftitle - self.xrefdescription = xrefdescription - def factory(*args_, **kwargs_): - if docXRefSectType.subclass: - return docXRefSectType.subclass(*args_, **kwargs_) - else: - return docXRefSectType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_xreftitle(self): return self.xreftitle - def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle - def add_xreftitle(self, value): self.xreftitle.append(value) - def insert_xreftitle(self, index, value): self.xreftitle[index] = value - def get_xrefdescription(self): return self.xrefdescription - def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription - def get_id(self): return self.id - def set_id(self, id): self.id = id - def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): - if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): - for xreftitle_ in self.xreftitle: - showIndent(outfile, level) - outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) - if self.xrefdescription: - self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) - def hasContent_(self): - if ( - self.xreftitle is not None or - self.xrefdescription is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docXRefSectType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.id is not None: - showIndent(outfile, level) - outfile.write('id = %s,\n' % (self.id,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('xreftitle=[\n') - level += 1 - for xreftitle in self.xreftitle: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.xrefdescription: - showIndent(outfile, level) - outfile.write('xrefdescription=model_.descriptionType(\n') - self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('id'): - self.id = attrs.get('id').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xreftitle': - xreftitle_ = '' - for text__content_ in child_.childNodes: - xreftitle_ += text__content_.nodeValue - self.xreftitle.append(xreftitle_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xrefdescription': - obj_ = descriptionType.factory() - obj_.build(child_) - self.set_xrefdescription(obj_) -# end class docXRefSectType - - -class docCopyType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, link=None, para=None, sect1=None, internal=None): - self.link = link - if para is None: - self.para = [] - else: - self.para = para - if sect1 is None: - self.sect1 = [] - else: - self.sect1 = sect1 - self.internal = internal - def factory(*args_, **kwargs_): - if docCopyType.subclass: - return docCopyType.subclass(*args_, **kwargs_) - else: - return docCopyType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_para(self): return self.para - def set_para(self, para): self.para = para - def add_para(self, value): self.para.append(value) - def insert_para(self, index, value): self.para[index] = value - def get_sect1(self): return self.sect1 - def set_sect1(self, sect1): self.sect1 = sect1 - def add_sect1(self, value): self.sect1.append(value) - def insert_sect1(self, index, value): self.sect1[index] = value - def get_internal(self): return self.internal - def set_internal(self, internal): self.internal = internal - def get_link(self): return self.link - def set_link(self, link): self.link = link - def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCopyType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): - if self.link is not None: - outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) - def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): - for para_ in self.para: - para_.export(outfile, level, namespace_, name_='para') - for sect1_ in self.sect1: - sect1_.export(outfile, level, namespace_, name_='sect1') - if self.internal: - self.internal.export(outfile, level, namespace_, name_='internal') - def hasContent_(self): - if ( - self.para is not None or - self.sect1 is not None or - self.internal is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docCopyType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.link is not None: - showIndent(outfile, level) - outfile.write('link = %s,\n' % (self.link,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('para=[\n') - level += 1 - for para in self.para: - showIndent(outfile, level) - outfile.write('model_.para(\n') - para.exportLiteral(outfile, level, name_='para') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('sect1=[\n') - level += 1 - for sect1 in self.sect1: - showIndent(outfile, level) - outfile.write('model_.sect1(\n') - sect1.exportLiteral(outfile, level, name_='sect1') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.internal: - showIndent(outfile, level) - outfile.write('internal=model_.docInternalType(\n') - self.internal.exportLiteral(outfile, level, name_='internal') - showIndent(outfile, level) - outfile.write('),\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('link'): - self.link = attrs.get('link').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': - obj_ = docParaType.factory() - obj_.build(child_) - self.para.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': - obj_ = docSect1Type.factory() - obj_.build(child_) - self.sect1.append(obj_) - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': - obj_ = docInternalType.factory() - obj_.build(child_) - self.set_internal(obj_) -# end class docCopyType - - -class docCharType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, char=None, valueOf_=''): - self.char = char - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docCharType.subclass: - return docCharType.subclass(*args_, **kwargs_) - else: - return docCharType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_char(self): return self.char - def set_char(self, char): self.char = char - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCharType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): - if self.char is not None: - outfile.write(' char=%s' % (quote_attrib(self.char), )) - def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docCharType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.char is not None: - showIndent(outfile, level) - outfile.write('char = "%s",\n' % (self.char,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('char'): - self.char = attrs.get('char').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docCharType - - -class docEmptyType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, valueOf_=''): - self.valueOf_ = valueOf_ - def factory(*args_, **kwargs_): - if docEmptyType.subclass: - return docEmptyType.subclass(*args_, **kwargs_) - else: - return docEmptyType(*args_, **kwargs_) - factory = staticmethod(factory) - def getValueOf_(self): return self.valueOf_ - def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ - def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): - pass - def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') - outfile.write(value) - else: - outfile.write(quote_xml('%s' % self.valueOf_)) - def hasContent_(self): - if ( - self.valueOf_ is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='docEmptyType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - self.valueOf_ = '' - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - pass - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.TEXT_NODE: - self.valueOf_ += child_.nodeValue - elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' -# end class docEmptyType - - -USAGE_TEXT = """ -Usage: python .py [ -s ] -Options: - -s Use the SAX parser, not the minidom parser. -""" - -def usage(): - print USAGE_TEXT - sys.exit(1) - - -def parse(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') - return rootObj - - -def parseString(inString): - doc = minidom.parseString(inString) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') - return rootObj - - -def parseLiteral(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('from compound import *\n\n') - sys.stdout.write('rootObj = doxygen(\n') - rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") - sys.stdout.write(')\n') - return rootObj - - -def main(): - args = sys.argv[1:] - if len(args) == 1: - parse(args[0]) - else: - usage() - - -if __name__ == '__main__': - main() - #import pdb - #pdb.run('main()') - diff --git a/docs/doxygen/doxyxml/generated/index.py b/docs/doxygen/doxyxml/generated/index.py deleted file mode 100644 index 7a70e14..0000000 --- a/docs/doxygen/doxyxml/generated/index.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python - -""" -Generated Mon Feb 9 19:08:05 2009 by generateDS.py. -""" - -from xml.dom import minidom - -import os -import sys -import compound - -import indexsuper as supermod - -class DoxygenTypeSub(supermod.DoxygenType): - def __init__(self, version=None, compound=None): - supermod.DoxygenType.__init__(self, version, compound) - - def find_compounds_and_members(self, details): - """ - Returns a list of all compounds and their members which match details - """ - - results = [] - for compound in self.compound: - members = compound.find_members(details) - if members: - results.append([compound, members]) - else: - if details.match(compound): - results.append([compound, []]) - - return results - -supermod.DoxygenType.subclass = DoxygenTypeSub -# end class DoxygenTypeSub - - -class CompoundTypeSub(supermod.CompoundType): - def __init__(self, kind=None, refid=None, name='', member=None): - supermod.CompoundType.__init__(self, kind, refid, name, member) - - def find_members(self, details): - """ - Returns a list of all members which match details - """ - - results = [] - - for member in self.member: - if details.match(member): - results.append(member) - - return results - -supermod.CompoundType.subclass = CompoundTypeSub -# end class CompoundTypeSub - - -class MemberTypeSub(supermod.MemberType): - - def __init__(self, kind=None, refid=None, name=''): - supermod.MemberType.__init__(self, kind, refid, name) - -supermod.MemberType.subclass = MemberTypeSub -# end class MemberTypeSub - - -def parse(inFilename): - - doc = minidom.parse(inFilename) - rootNode = doc.documentElement - rootObj = supermod.DoxygenType.factory() - rootObj.build(rootNode) - - return rootObj - diff --git a/docs/doxygen/doxyxml/generated/indexsuper.py b/docs/doxygen/doxyxml/generated/indexsuper.py deleted file mode 100644 index a991530..0000000 --- a/docs/doxygen/doxyxml/generated/indexsuper.py +++ /dev/null @@ -1,523 +0,0 @@ -#!/usr/bin/env python - -# -# Generated Thu Jun 11 18:43:54 2009 by generateDS.py. -# - -import sys -import getopt -from string import lower as str_lower -from xml.dom import minidom -from xml.dom import Node - -# -# User methods -# -# Calls to the methods in these classes are generated by generateDS.py. -# You can replace these methods by re-implementing the following class -# in a module named generatedssuper.py. - -try: - from generatedssuper import GeneratedsSuper -except ImportError, exp: - - class GeneratedsSuper: - def format_string(self, input_data, input_name=''): - return input_data - def format_integer(self, input_data, input_name=''): - return '%d' % input_data - def format_float(self, input_data, input_name=''): - return '%f' % input_data - def format_double(self, input_data, input_name=''): - return '%e' % input_data - def format_boolean(self, input_data, input_name=''): - return '%s' % input_data - - -# -# If you have installed IPython you can uncomment and use the following. -# IPython is available from http://ipython.scipy.org/. -# - -## from IPython.Shell import IPShellEmbed -## args = '' -## ipshell = IPShellEmbed(args, -## banner = 'Dropping into IPython', -## exit_msg = 'Leaving Interpreter, back to program.') - -# Then use the following line where and when you want to drop into the -# IPython shell: -# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') - -# -# Globals -# - -ExternalEncoding = 'ascii' - -# -# Support/utility functions. -# - -def showIndent(outfile, level): - for idx in range(level): - outfile.write(' ') - -def quote_xml(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - return s1 - -def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') - s1 = s1.replace('<', '<') - s1 = s1.replace('>', '>') - if '"' in s1: - if "'" in s1: - s1 = '"%s"' % s1.replace('"', """) - else: - s1 = "'%s'" % s1 - else: - s1 = '"%s"' % s1 - return s1 - -def quote_python(inStr): - s1 = inStr - if s1.find("'") == -1: - if s1.find('\n') == -1: - return "'%s'" % s1 - else: - return "'''%s'''" % s1 - else: - if s1.find('"') != -1: - s1 = s1.replace('"', '\\"') - if s1.find('\n') == -1: - return '"%s"' % s1 - else: - return '"""%s"""' % s1 - - -class MixedContainer: - # Constants for category: - CategoryNone = 0 - CategoryText = 1 - CategorySimple = 2 - CategoryComplex = 3 - # Constants for content_type: - TypeNone = 0 - TypeText = 1 - TypeString = 2 - TypeInteger = 3 - TypeFloat = 4 - TypeDecimal = 5 - TypeDouble = 6 - TypeBoolean = 7 - def __init__(self, category, content_type, name, value): - self.category = category - self.content_type = content_type - self.name = name - self.value = value - def getCategory(self): - return self.category - def getContenttype(self, content_type): - return self.content_type - def getValue(self): - return self.value - def getName(self): - return self.name - def export(self, outfile, level, name, namespace): - if self.category == MixedContainer.CategoryText: - outfile.write(self.value) - elif self.category == MixedContainer.CategorySimple: - self.exportSimple(outfile, level, name) - else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace,name) - def exportSimple(self, outfile, level, name): - if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeInteger or \ - self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeFloat or \ - self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) - elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) - def exportLiteral(self, outfile, level, name): - if self.category == MixedContainer.CategoryText: - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) - elif self.category == MixedContainer.CategorySimple: - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) - else: # category == MixedContainer.CategoryComplex - showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) - self.value.exportLiteral(outfile, level + 1) - showIndent(outfile, level) - outfile.write(')\n') - - -class _MemberSpec(object): - def __init__(self, name='', data_type='', container=0): - self.name = name - self.data_type = data_type - self.container = container - def set_name(self, name): self.name = name - def get_name(self): return self.name - def set_data_type(self, data_type): self.data_type = data_type - def get_data_type(self): return self.data_type - def set_container(self, container): self.container = container - def get_container(self): return self.container - - -# -# Data representation classes. -# - -class DoxygenType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, version=None, compound=None): - self.version = version - if compound is None: - self.compound = [] - else: - self.compound = compound - def factory(*args_, **kwargs_): - if DoxygenType.subclass: - return DoxygenType.subclass(*args_, **kwargs_) - else: - return DoxygenType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_compound(self): return self.compound - def set_compound(self, compound): self.compound = compound - def add_compound(self, value): self.compound.append(value) - def insert_compound(self, index, value): self.compound[index] = value - def get_version(self): return self.version - def set_version(self, version): self.version = version - def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): - outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) - def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): - for compound_ in self.compound: - compound_.export(outfile, level, namespace_, name_='compound') - def hasContent_(self): - if ( - self.compound is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='DoxygenType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.version is not None: - showIndent(outfile, level) - outfile.write('version = %s,\n' % (self.version,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('compound=[\n') - level += 1 - for compound in self.compound: - showIndent(outfile, level) - outfile.write('model_.compound(\n') - compound.exportLiteral(outfile, level, name_='compound') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('version'): - self.version = attrs.get('version').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compound': - obj_ = CompoundType.factory() - obj_.build(child_) - self.compound.append(obj_) -# end class DoxygenType - - -class CompoundType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, refid=None, name=None, member=None): - self.kind = kind - self.refid = refid - self.name = name - if member is None: - self.member = [] - else: - self.member = member - def factory(*args_, **kwargs_): - if CompoundType.subclass: - return CompoundType.subclass(*args_, **kwargs_) - else: - return CompoundType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_member(self): return self.member - def set_member(self, member): self.member = member - def add_member(self, value): self.member.append(value) - def insert_member(self, index, value): self.member[index] = value - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='CompoundType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'): - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'): - if self.name is not None: - showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) - for member_ in self.member: - member_.export(outfile, level, namespace_, name_='member') - def hasContent_(self): - if ( - self.name is not None or - self.member is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='CompoundType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('member=[\n') - level += 1 - for member in self.member: - showIndent(outfile, level) - outfile.write('model_.member(\n') - member.exportLiteral(outfile, level, name_='member') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ - elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': - obj_ = MemberType.factory() - obj_.build(child_) - self.member.append(obj_) -# end class CompoundType - - -class MemberType(GeneratedsSuper): - subclass = None - superclass = None - def __init__(self, kind=None, refid=None, name=None): - self.kind = kind - self.refid = refid - self.name = name - def factory(*args_, **kwargs_): - if MemberType.subclass: - return MemberType.subclass(*args_, **kwargs_) - else: - return MemberType(*args_, **kwargs_) - factory = staticmethod(factory) - def get_name(self): return self.name - def set_name(self, name): self.name = name - def get_kind(self): return self.kind - def set_kind(self, kind): self.kind = kind - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''): - showIndent(outfile, level) - outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='MemberType') - if self.hasContent_(): - outfile.write('>\n') - self.exportChildren(outfile, level + 1, namespace_, name_) - showIndent(outfile, level) - outfile.write('\n' % (namespace_, name_)) - else: - outfile.write(' />\n') - def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'): - outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='MemberType'): - if self.name is not None: - showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) - def hasContent_(self): - if ( - self.name is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='MemberType'): - level += 1 - self.exportLiteralAttributes(outfile, level, name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, name_): - if self.kind is not None: - showIndent(outfile, level) - outfile.write('kind = "%s",\n' % (self.kind,)) - if self.refid is not None: - showIndent(outfile, level) - outfile.write('refid = %s,\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) - def build(self, node_): - attrs = node_.attributes - self.buildAttributes(attrs) - for child_ in node_.childNodes: - nodeName_ = child_.nodeName.split(':')[-1] - self.buildChildren(child_, nodeName_) - def buildAttributes(self, attrs): - if attrs.get('kind'): - self.kind = attrs.get('kind').value - if attrs.get('refid'): - self.refid = attrs.get('refid').value - def buildChildren(self, child_, nodeName_): - if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': - name_ = '' - for text__content_ in child_.childNodes: - name_ += text__content_.nodeValue - self.name = name_ -# end class MemberType - - -USAGE_TEXT = """ -Usage: python .py [ -s ] -Options: - -s Use the SAX parser, not the minidom parser. -""" - -def usage(): - print USAGE_TEXT - sys.exit(1) - - -def parse(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') - return rootObj - - -def parseString(inString): - doc = minidom.parseString(inString) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('\n') - rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') - return rootObj - - -def parseLiteral(inFileName): - doc = minidom.parse(inFileName) - rootNode = doc.documentElement - rootObj = DoxygenType.factory() - rootObj.build(rootNode) - # Enable Python to collect the space used by the DOM. - doc = None - sys.stdout.write('from index import *\n\n') - sys.stdout.write('rootObj = doxygenindex(\n') - rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex") - sys.stdout.write(')\n') - return rootObj - - -def main(): - args = sys.argv[1:] - if len(args) == 1: - parse(args[0]) - else: - usage() - - - - -if __name__ == '__main__': - main() - #import pdb - #pdb.run('main()') - diff --git a/docs/doxygen/doxyxml/text.py b/docs/doxygen/doxyxml/text.py deleted file mode 100644 index 4a79d0a..0000000 --- a/docs/doxygen/doxyxml/text.py +++ /dev/null @@ -1,53 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -""" -Utilities for extracting text from generated classes. -""" - -def is_string(txt): - if isinstance(txt, str): - return True - try: - if isinstance(txt, unicode): - return True - except NameError: - pass - return False - -def description(obj): - if obj is None: - return None - return description_bit(obj).strip() - -def description_bit(obj): - if hasattr(obj, 'content'): - contents = [description_bit(item) for item in obj.content] - result = ''.join(contents) - elif hasattr(obj, 'content_'): - contents = [description_bit(item) for item in obj.content_] - result = ''.join(contents) - elif hasattr(obj, 'value'): - result = description_bit(obj.value) - elif is_string(obj): - return obj - else: - raise StandardError('Expecting a string or something with content, content_ or value attribute') - # If this bit is a paragraph then add one some line breaks. - if hasattr(obj, 'name') and obj.name == 'para': - result += "\n\n" - return result diff --git a/docs/doxygen/other/group_defs.dox b/docs/doxygen/other/group_defs.dox deleted file mode 100644 index c404d61..0000000 --- a/docs/doxygen/other/group_defs.dox +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * \defgroup block GNU Radio NORDIC C++ Signal Processing Blocks - * \brief All C++ blocks that can be used from the NORDIC GNU Radio - * module are listed here or in the subcategories below. - * - */ - diff --git a/docs/doxygen/other/main_page.dox b/docs/doxygen/other/main_page.dox deleted file mode 100644 index 34a94f1..0000000 --- a/docs/doxygen/other/main_page.dox +++ /dev/null @@ -1,10 +0,0 @@ -/*! \mainpage - -Welcome to the GNU Radio NORDIC Block - -This is the intro page for the Doxygen manual generated for the NORDIC -block (docs/doxygen/other/main_page.dox). Edit it to add more detailed -documentation about the new GNU Radio modules contained in this -project. - -*/ diff --git a/docs/doxygen/swig_doc.py b/docs/doxygen/swig_doc.py deleted file mode 100644 index 0d4af35..0000000 --- a/docs/doxygen/swig_doc.py +++ /dev/null @@ -1,252 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -""" -Creates the swig_doc.i SWIG interface file. -Execute using: python swig_doc.py xml_path outputfilename - -The file instructs SWIG to transfer the doxygen comments into the -python docstrings. - -""" - -import sys - -try: - from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base -except ImportError: - from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base - - -def py_name(name): - bits = name.split('_') - return '_'.join(bits[1:]) - -def make_name(name): - bits = name.split('_') - return bits[0] + '_make_' + '_'.join(bits[1:]) - - -class Block(object): - """ - Checks if doxyxml produced objects correspond to a gnuradio block. - """ - - @classmethod - def includes(cls, item): - if not isinstance(item, DoxyClass): - return False - # Check for a parsing error. - if item.error(): - return False - return item.has_member(make_name(item.name()), DoxyFriend) - - -def utoascii(text): - """ - Convert unicode text into ascii and escape quotes. - """ - if text is None: - return '' - out = text.encode('ascii', 'replace') - out = out.replace('"', '\\"') - return out - - -def combine_descriptions(obj): - """ - Combines the brief and detailed descriptions of an object together. - """ - description = [] - bd = obj.brief_description.strip() - dd = obj.detailed_description.strip() - if bd: - description.append(bd) - if dd: - description.append(dd) - return utoascii('\n\n'.join(description)).strip() - - -entry_templ = '%feature("docstring") {name} "{docstring}"' -def make_entry(obj, name=None, templ="{description}", description=None): - """ - Create a docstring entry for a swig interface file. - - obj - a doxyxml object from which documentation will be extracted. - name - the name of the C object (defaults to obj.name()) - templ - an optional template for the docstring containing only one - variable named 'description'. - description - if this optional variable is set then it's value is - used as the description instead of extracting it from obj. - """ - if name is None: - name=obj.name() - if "operator " in name: - return '' - if description is None: - description = combine_descriptions(obj) - docstring = templ.format(description=description) - if not docstring: - return '' - return entry_templ.format( - name=name, - docstring=docstring, - ) - - -def make_func_entry(func, name=None, description=None, params=None): - """ - Create a function docstring entry for a swig interface file. - - func - a doxyxml object from which documentation will be extracted. - name - the name of the C object (defaults to func.name()) - description - if this optional variable is set then it's value is - used as the description instead of extracting it from func. - params - a parameter list that overrides using func.params. - """ - if params is None: - params = func.params - params = [prm.declname for prm in params] - if params: - sig = "Params: (%s)" % ", ".join(params) - else: - sig = "Params: (NONE)" - templ = "{description}\n\n" + sig - return make_entry(func, name=name, templ=utoascii(templ), - description=description) - - -def make_class_entry(klass, description=None): - """ - Create a class docstring for a swig interface file. - """ - output = [] - output.append(make_entry(klass, description=description)) - for func in klass.in_category(DoxyFunction): - name = klass.name() + '::' + func.name() - output.append(make_func_entry(func, name=name)) - return "\n\n".join(output) - - -def make_block_entry(di, block): - """ - Create class and function docstrings of a gnuradio block for a - swig interface file. - """ - descriptions = [] - # Get the documentation associated with the class. - class_desc = combine_descriptions(block) - if class_desc: - descriptions.append(class_desc) - # Get the documentation associated with the make function - make_func = di.get_member(make_name(block.name()), DoxyFunction) - make_func_desc = combine_descriptions(make_func) - if make_func_desc: - descriptions.append(make_func_desc) - # Get the documentation associated with the file - try: - block_file = di.get_member(block.name() + ".h", DoxyFile) - file_desc = combine_descriptions(block_file) - if file_desc: - descriptions.append(file_desc) - except base.Base.NoSuchMember: - # Don't worry if we can't find a matching file. - pass - # And join them all together to make a super duper description. - super_description = "\n\n".join(descriptions) - # Associate the combined description with the class and - # the make function. - output = [] - output.append(make_class_entry(block, description=super_description)) - creator = block.get_member(block.name(), DoxyFunction) - output.append(make_func_entry(make_func, description=super_description, - params=creator.params)) - return "\n\n".join(output) - - -def make_swig_interface_file(di, swigdocfilename, custom_output=None): - - output = [""" -/* - * This file was automatically generated using swig_doc.py. - * - * Any changes to it will be lost next time it is regenerated. - */ -"""] - - if custom_output is not None: - output.append(custom_output) - - # Create docstrings for the blocks. - blocks = di.in_category(Block) - make_funcs = set([]) - for block in blocks: - try: - make_func = di.get_member(make_name(block.name()), DoxyFunction) - make_funcs.add(make_func.name()) - output.append(make_block_entry(di, block)) - except block.ParsingError: - print('Parsing error for block %s' % block.name()) - - # Create docstrings for functions - # Don't include the make functions since they have already been dealt with. - funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs] - for f in funcs: - try: - output.append(make_func_entry(f)) - except f.ParsingError: - print('Parsing error for function %s' % f.name()) - - # Create docstrings for classes - block_names = [block.name() for block in blocks] - klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names] - for k in klasses: - try: - output.append(make_class_entry(k)) - except k.ParsingError: - print('Parsing error for class %s' % k.name()) - - # Docstrings are not created for anything that is not a function or a class. - # If this excludes anything important please add it here. - - output = "\n\n".join(output) - - swig_doc = file(swigdocfilename, 'w') - swig_doc.write(output) - swig_doc.close() - -if __name__ == "__main__": - # Parse command line options and set up doxyxml. - err_msg = "Execute using: python swig_doc.py xml_path outputfilename" - if len(sys.argv) != 3: - raise StandardError(err_msg) - xml_path = sys.argv[1] - swigdocfilename = sys.argv[2] - di = DoxyIndex(xml_path) - - # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined! - # This is presumably a bug in SWIG. - #msg_q = di.get_member(u'gr_msg_queue', DoxyClass) - #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction) - #delete_head = msg_q.get_member(u'delete_head', DoxyFunction) - output = [] - #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail')) - #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head')) - custom_output = "\n\n".join(output) - - # Generate the docstrings interface file. - make_swig_interface_file(di, swigdocfilename, custom_output=custom_output) diff --git a/examples/microsoft_mouse_sniffer.py b/examples/microsoft_mouse_sniffer.py deleted file mode 100755 index 8b18aca..0000000 --- a/examples/microsoft_mouse_sniffer.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import osmosdr -import nordic -import pmt -import struct -import time - - -class top_block(gr.top_block): - - def __init__(self): - gr.top_block.__init__(self, "Microsoft Mouse Sniffer") - - # SDR configuration - self.freq = 2403e6 - self.gain = 70 - self.symbol_rate = 2e6 - self.sample_rate = 4e6 - - # SDR source (gr-osmosdr source) - self.osmosdr_source = osmosdr.source() - self.osmosdr_source.set_sample_rate(self.sample_rate) - self.osmosdr_source.set_center_freq(self.freq) - self.osmosdr_source.set_gain(self.gain) - self.osmosdr_source.set_antenna('TX/RX') - - # Low pass filter - self.lpf = filter.fir_filter_ccf( - 1, firdes.low_pass_2(1, self.sample_rate, self.symbol_rate / 2, 50e3, 50)) - - # GFSK demod, defaults to 2 samples per symbol - self.gfsk_demod = digital.gfsk_demod() - - # Nordic RX - self.nordic_rx = nordic.nordic_rx(3, 5, 2, 2) - - # Connect the blocks - self.connect((self.osmosdr_source, 0), (self.lpf, 0)) - self.connect((self.lpf, 0), (self.gfsk_demod, 0)) - self.connect((self.gfsk_demod, 0), (self.nordic_rx, 0)) - - # Handle incoming packets - self.nordictap_handler = microsoft_nordictap_handler(self) - self.msg_connect( - self.nordic_rx, "nordictap_out", self.nordictap_handler, "nordictap_in") - - # Tune the USRP by nRF24L channel number - def set_channel(self, channel): - - new_channel = 2400e6 + channel * 1e6 - self.osmosdr_source.set_center_freq(2400e6 + channel * 1e6) - self.nordic_rx.set_channel(channel) - - -# Microsoft mouse nordictap handler -class microsoft_nordictap_handler(gr.sync_block): - - def __init__(self, tb): - gr.sync_block.__init__( - self, name="Nordictap Handler", in_sig=None, out_sig=None) - - self.tb = tb - self.message_port_register_in(pmt.intern("nordictap_in")) - self.set_msg_handler( - pmt.intern("nordictap_in"), self.nordictap_handler) - - # Tick / channel hopping state and logic - self.last_rx = time.time() - self.last_tune = time.time() - self.ch_timeout = 0.4 # timeout a channel after 200ms - self.last_ch = 0 - thread.start_new_thread(self.tick, ()) - - # Channels and channel groups - self.channels = [3, 29, 21, 5, 23, 17, 19, 50, 31, 25, - 46, 27, 78, 70, 72, 44, 56, 48, 68, 80, 54, 52, 74, 76] - self.channel_groups = [] - for x in range(6): - chs = [] - for y in range(4): - chs.append(self.channels[y * 6 + x]) - self.channel_groups.append(chs) - - # Discovered device state - self.mouse_address = None - - # 10ms tick - def tick(self): - - while True: - - # Check for a stale channel - if ((time.time() - self.last_rx) > self.ch_timeout * 5) and \ - ((time.time() - self.last_tune) > self.ch_timeout): - - self.last_ch += 1 - if self.last_ch >= len(self.channels): - self.last_ch = 0 - print 'Tuning to 24%02i MHz' % self.channels[self.last_ch] - self.last_tune = time.time() - self.tb.set_channel(self.channels[self.last_ch]) - - # Wait 10ms - time.sleep(0.01) - - def nordictap_handler(self, msg): - - data = pmt.to_python(msg).tostring() - - # Unpack the header - values = struct.unpack('BBBBBBB', data[0:7]) - channel = values[0] - data_rate = values[1] - address_length = values[2] - payload_length = values[3] - sequence_number = values[4] - no_ack = values[5] - crc_length = values[6] - - # Parse the address, payload, and crc - address = data[7:7 + address_length] - payload = data[7 + address_length:7 + address_length + payload_length] - crc = data[7 + address_length + payload_length: - 7 + address_length + payload_length + crc_length] - - # Check for a Microsoft mouse - if self.mouse_address is None: - if ((ord(address[0]) & 0xF0) == 0xA0) and \ - ((ord(address[4]) & 0x0F) == 0x06) and \ - payload_length == 19: - - # Set the mouse address - self.mouse_address = address - - # Set the channel group - for x in range(6): - if channel in self.channel_groups[x]: - self.channels = self.channel_groups[x] - break - - # Camp on the channel and print out the packet if this is our target - # device - if self.mouse_address == address: - - self.last_rx = time.time() - - # Print the channel, sequence number, address and payload - print 'CH=' + str(2400 + channel), - print 'SEQ=' + str(sequence_number), - print 'ADDR=' + ':'.join('%02X' % ord(b) for b in address), - print 'PLD=' + ':'.join('%02X' % ord(b) for b in payload), - print 'CRC=' + ':'.join('%02X' % ord(b) for b in crc) - - -def main(): - tb = top_block() - tb.start() - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/examples/nordic_auto_ack.py b/examples/nordic_auto_ack.py deleted file mode 100755 index c2b77c6..0000000 --- a/examples/nordic_auto_ack.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import nordic -import pmt -import struct -import time -import numpy -import array -import osmosdr -import argparse -from bitstring import BitArray -from gnuradio import uhd -from Queue import Queue - - -class top_block(gr.top_block): - - def __init__(self, args): - gr.top_block.__init__(self, "Nordic Auto-ACK Example") - - # SDR configuration - self.freq = 2400e6 + args.channel * 1e6 - self.gain = args.gain - self.symbol_rate = args.data_rate - self.sample_rate = args.data_rate * args.samples_per_symbol - - # SDR source (gr-osmosdr source) - self.osmosdr_source = osmosdr.source() - self.osmosdr_source.set_sample_rate(self.sample_rate) - self.osmosdr_source.set_center_freq(self.freq) - self.osmosdr_source.set_gain(self.gain) - self.osmosdr_source.set_antenna('TX/RX') - - # SDR sink (gr-osmosdr source) - self.osmosdr_sink = osmosdr.sink() - self.osmosdr_sink.set_sample_rate(self.sample_rate) - self.osmosdr_sink.set_center_freq(self.freq) - self.osmosdr_sink.set_gain(self.gain) - self.osmosdr_sink.set_antenna('TX/RX') - - # Transmit chain - self.tx = nordic.nordic_tx() - self.gfsk_mod = digital.gfsk_mod( - samples_per_symbol=args.samples_per_symbol) - self.connect(self.tx, self.gfsk_mod) - self.connect(self.gfsk_mod, self.osmosdr_sink) - - # Receive chain - dr = 0 - if args.data_rate == 1e6: - dr = 1 - elif args.data_rate == 2e6: - dr = 2 - self.rx = nordic.nordic_rx( - args.channel, args.address_length, args.crc_length, dr) - self.gfsk_demod = digital.gfsk_demod( - samples_per_symbol=args.samples_per_symbol) - self.lpf = filter.fir_filter_ccf( - 1, firdes.low_pass_2(1, self.sample_rate, self.symbol_rate / 2, 50e3, 50)) - self.connect(self.osmosdr_source, self.lpf) - self.connect(self.lpf, self.gfsk_demod) - self.connect(self.gfsk_demod, self.rx) - - # Handle incoming packets - self.nordictap_ack_handler = nordictap_ack_handler() - self.msg_connect(self.rx, "nordictap_out", - self.nordictap_ack_handler, "nordictap_in") - - # Reply with ACKs - self.msg_connect(self.nordictap_ack_handler, - "nordictap_out", self.tx, "nordictap_in") - - -# Nordic Auto-ACK handler -class nordictap_ack_handler(gr.sync_block): - - # Constructor - - def __init__(self): - gr.sync_block.__init__( - self, name="Nordictap Handler", in_sig=None, out_sig=None) - - # Received packet input port - self.message_port_register_in(pmt.intern("nordictap_in")) - self.set_msg_handler( - pmt.intern("nordictap_in"), self.nordictap_handler) - - # ACK output port - self.message_port_register_out(pmt.intern("nordictap_out")) - - # Handle incoming packets, and reply with ACKs - def nordictap_handler(self, msg): - - # PMT to byte string - data = pmt.to_python(msg).tostring() - - # Unpack the header - values = struct.unpack('BBBBBBBB', data[0:8]) - channel = values[0] - data_rate = values[1] - address_length = values[2] - payload_length = values[3] - sequence_number = values[4] - no_ack = values[5] - crc_length = values[6] - - # Parse the address, payload, and crc - address = data[7:7 + address_length] - payload = data[7 + address_length:7 + address_length + payload_length] - crc = data[7 + address_length + payload_length: - 7 + address_length + payload_length + crc_length] - - # ACK if needed - if payload_length > 0 and no_ack == 0: - - # Print the channel, sequence number, address and payload - print "ACK'd Packet: ", - print 'CH=' + str(2400 + channel), - print 'SEQ=' + str(sequence_number), - print 'ADDR=' + ':'.join('%02X' % ord(b) for b in address), - print 'PLD=' + ':'.join('%02X' % ord(b) for b in payload), - print 'CRC=' + ':'.join('%02X' % ord(b) for b in crc) - - # Build an ACK - nordictap = [0] + [4, 2, 5, 0, sequence_number, 0, 2] - for c in address: - nordictap.append(ord(c)) - - # Transmit an ACK - vec = pmt.make_u8vector(len(nordictap), 0) - for x in range(len(nordictap)): - pmt.u8vector_set(vec, x, nordictap[x]) - self.message_port_pub(pmt.intern("nordictap_out"), vec) - - -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser('Nordic Auto-ACK Example', - formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=120)) - parser.add_argument( - '-c', '--channel', type=int, help='RF channel (0-125)', default=4) - parser.add_argument('-r', '--data_rate', type=float, - help='Data Rate (250e3, 1e6 or 2e6', default=2e6, choices=[250e3, 1e6, 2e6]) - parser.add_argument('-l', '--crc_length', type=int, - help='CRC Length (1-2)', default=2, choices=[1, 2]) - parser.add_argument('-a', '--address_length', type=int, - help='Address Length (3-5)', default=5, choices=[3, 4, 5]) - parser.add_argument('-s', '--samples_per_symbol', - type=int, help='Samples Per Symbol', default=2) - parser.add_argument( - '-g', '--gain', type=float, help='Radio Gain', default=80) - - args = parser.parse_args() - - tb = top_block(args) - tb.start() - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/examples/nordic_channelized_receiver.py b/examples/nordic_channelized_receiver.py deleted file mode 100755 index af9179d..0000000 --- a/examples/nordic_channelized_receiver.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import nordic -import pmt -import struct -import time -import numpy -import array -import osmosdr -import argparse -from bitstring import BitArray -from gnuradio import uhd -from Queue import Queue - - -class top_block(gr.top_block): - - def __init__(self, args): - gr.top_block.__init__(self, "Nordic Single-Channel Receiver Example") - - self.freq = 2414e6 - self.gain = args.gain - self.symbol_rate = 2e6 - self.sample_rate = 4e6 - - # Channel map - channel_count = 3 - channel_map = [14, 18, 10] - - # Data rate index - dr = 2 # 2M - - # SDR source (gr-osmosdr source) - self.osmosdr_source = osmosdr.source() - self.osmosdr_source.set_sample_rate(self.sample_rate * channel_count) - self.osmosdr_source.set_center_freq(self.freq) - self.osmosdr_source.set_gain(self.gain) - self.osmosdr_source.set_antenna('TX/RX') - - # PFB channelizer - taps = firdes.low_pass_2( - 1, self.sample_rate, self.symbol_rate / 2, 100e3, 30) - self.channelizer = filter.pfb_channelizer_ccf(channel_count, taps, 1) - - # Stream to streams (PFB channelizer input) - self.s2ss = blocks.stream_to_streams( - gr.sizeof_gr_complex, channel_count) - self.connect(self.osmosdr_source, self.s2ss) - - # Demodulators and packet deframers - self.nordictap_printer = nordictap_printer() - self.demods = [] - self.rxs = [] - for x in range(channel_count): - self.connect((self.s2ss, x), (self.channelizer, x)) - self.demods.append(digital.gfsk_demod()) - self.rxs.append(nordic.nordic_rx(x, 5, 2, dr)) - self.connect((self.channelizer, x), self.demods[x]) - self.connect(self.demods[x], self.rxs[x]) - self.msg_connect( - self.rxs[x], "nordictap_out", self.nordictap_printer, "nordictap_in") - - -# Nordic Printer -class nordictap_printer(gr.sync_block): - - # Constructor - - def __init__(self): - gr.sync_block.__init__( - self, name="Nordictap Printer", in_sig=None, out_sig=None) - - # Received packet input port - self.message_port_register_in(pmt.intern("nordictap_in")) - self.set_msg_handler( - pmt.intern("nordictap_in"), self.nordictap_handler) - - # Handle incoming packets, and print payloads - def nordictap_handler(self, msg): - - # PMT to byte string - data = pmt.to_python(msg).tostring() - - # Unpack the header - values = struct.unpack('BBBBBBBB', data[0:8]) - channel = values[0] - data_rate = values[1] - address_length = values[2] - payload_length = values[3] - sequence_number = values[4] - no_ack = values[5] - crc_length = values[6] - - # Parse the address, payload, and crc - address = data[7:7 + address_length] - payload = data[7 + address_length:7 + address_length + payload_length] - crc = data[7 + address_length + payload_length: - 7 + address_length + payload_length + crc_length] - - # Print the channel, sequence number, address and payload - print 'CH=' + str(2400 + channel), - print 'SEQ=' + str(sequence_number), - print 'ADDR=' + ':'.join('%02X' % ord(b) for b in address), - print 'PLD=' + ':'.join('%02X' % ord(b) for b in payload), - print 'CRC=' + ':'.join('%02X' % ord(b) for b in crc) - - -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser('Nordic Channelized Receiver Example', - formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=120)) - parser.add_argument( - '-g', '--gain', type=float, help='Radio Gain', default=30) - - args = parser.parse_args() - - tb = top_block(args) - tb.start() - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/examples/nordic_channelized_transmitter.py b/examples/nordic_channelized_transmitter.py deleted file mode 100755 index bdf5462..0000000 --- a/examples/nordic_channelized_transmitter.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import nordic -import pmt -import struct -import time -import numpy -import array -import random -import osmosdr -import argparse -from bitstring import BitArray -from gnuradio import uhd -from Queue import Queue - - -class top_block(gr.top_block): - - def __init__(self, args): - gr.top_block.__init__(self, "Nordic Single-Channel Receiver Example") - - self.freq = 2414e6 - self.gain = args.gain - self.symbol_rate = 2e6 - self.sample_rate = 4e6 - - # Channel map - channel_count = 3 - channel_map = [14, 18, 10] - - # Data rate index - dr = 2 # 2M - - # SDR sink (gr-osmosdr sink) - self.osmosdr_sink = osmosdr.sink() - self.osmosdr_sink.set_sample_rate(self.sample_rate * channel_count) - self.osmosdr_sink.set_center_freq(self.freq) - self.osmosdr_sink.set_gain(self.gain) - self.osmosdr_sink.set_antenna('TX/RX') - - # PFB channelizer - taps = firdes.low_pass_2( - 1, self.sample_rate, self.symbol_rate / 2, 100e3, 30) - self.synthesizer = filter.pfb_synthesizer_ccf(channel_count, taps) - - # Modulators and packet framers - self.nordictap_transmitter = nordictap_transmitter(channel_map) - self.mods = [] - self.tx = nordic.nordic_tx(channel_count) - for x in range(channel_count): - self.mods.append(digital.gfsk_mod()) - self.connect((self.tx, x), self.mods[x]) - self.connect(self.mods[x], (self.synthesizer, x)) - self.connect(self.synthesizer, self.osmosdr_sink) - - # Wire up output packet connection - self.msg_connect(self.nordictap_transmitter, - "nordictap_out", self.tx, "nordictap_in") - - -# Nordic transmitter strobe -class nordictap_transmitter(gr.sync_block): - - # Constructor - - def __init__(self, channel_map): - gr.sync_block.__init__( - self, name="Nordictap Printer/Transmitter", in_sig=None, out_sig=None) - - self.channel_map = channel_map - - # Packet output port - self.message_port_register_out(pmt.intern("nordictap_out")) - - # Transmit a packet - def transmit(self, address, payload, channel_index, sequence_number): - - channel = self.channel_map[channel_index] - - # Build a payload - nordictap = [channel_index] + [ - channel, 2, len(address), len(payload), sequence_number, 0, 2] - for c in address: - nordictap.append(ord(c)) - for c in payload: - nordictap.append(ord(c)) - - # Transmit packet - vec = pmt.make_u8vector(len(nordictap), 0) - for x in range(len(nordictap)): - pmt.u8vector_set(vec, x, nordictap[x]) - self.message_port_pub(pmt.intern("nordictap_out"), vec) - - -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser('Nordic Channelized Transmitter Example', - formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=120)) - parser.add_argument( - '-g', '--gain', type=float, help='Radio Gain', default=30) - - args = parser.parse_args() - - tb = top_block(args) - tb.start() - - # Transmit some packets, hopping between three channels - address = '\x11\x22\x11\x22\x11' - payload = '\x55\x44\x33\x22\x11' - sequence_number = 0 - while True: - for x in range(3): - tb.nordictap_transmitter.transmit( - address, payload, x, sequence_number) - sequence_number += 1 - if sequence_number > 3: - sequence_number = 0 - time.sleep(0.1) - - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/examples/nordic_receiver.py b/examples/nordic_receiver.py deleted file mode 100755 index e362e78..0000000 --- a/examples/nordic_receiver.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import nordic -import pmt -import struct -import time -import numpy -import array -import osmosdr -import argparse -from bitstring import BitArray -from gnuradio import uhd -from Queue import Queue - - -class top_block(gr.top_block): - - def __init__(self, args): - gr.top_block.__init__(self, "Nordic Single-Channel Receiver Example") - - # SDR configuration - self.freq = 2400e6 + args.channel * 1e6 - self.gain = args.gain - self.symbol_rate = args.data_rate - self.sample_rate = args.data_rate * args.samples_per_symbol - - # SDR source (gr-osmosdr source)_tx_queue.push(msg); - self.osmosdr_source = osmosdr.source() - self.osmosdr_source.set_sample_rate(self.sample_rate) - self.osmosdr_source.set_center_freq(self.freq) - self.osmosdr_source.set_gain(self.gain) - self.osmosdr_source.set_antenna('TX/RX') - - # Receive chain - dr = 0 - if args.data_rate == 1e6: - dr = 1 - elif args.data_rate == 2e6: - dr = 2 - self.rx = nordic.nordic_rx( - args.channel, args.address_length, args.crc_length, dr) - self.gfsk_demod = digital.gfsk_demod( - samples_per_symbol=args.samples_per_symbol) - self.lpf = filter.fir_filter_ccf( - 1, firdes.low_pass_2(1, self.sample_rate, self.symbol_rate / 2, 50e3, 50)) - self.connect(self.osmosdr_source, self.lpf) - self.connect(self.lpf, self.gfsk_demod) - self.connect(self.gfsk_demod, self.rx) - - # Handle incoming packets - self.nordictap_printer = nordictap_printer() - self.msg_connect( - self.rx, "nordictap_out", self.nordictap_printer, "nordictap_in") - - -# Nordic Printer -class nordictap_printer(gr.sync_block): - - # Constructor - - def __init__(self): - gr.sync_block.__init__( - self, name="Nordictap Handler", in_sig=None, out_sig=None) - - # Received packet input port - self.message_port_register_in(pmt.intern("nordictap_in")) - self.set_msg_handler( - pmt.intern("nordictap_in"), self.nordictap_handler) - - # Handle incoming packets, and print payloads - def nordictap_handler(self, msg): - - # PMT to byte string - data = pmt.to_python(msg).tostring() - - # Unpack the header - values = struct.unpack('BBBBBBBB', data[0:8]) - channel = values[0] - data_rate = values[1] - address_length = values[2] - payload_length = values[3] - sequence_number = values[4] - no_ack = values[5] - crc_length = values[6] - - # Parse the address, payload, and crc - address = data[7:7 + address_length] - payload = data[7 + address_length:7 + address_length + payload_length] - crc = data[7 + address_length + payload_length: - 7 + address_length + payload_length + crc_length] - - # Print the channel, sequence number, address and payload - print 'CH=' + str(2400 + channel), - print 'SEQ=' + str(sequence_number), - print 'ADDR=' + ':'.join('%02X' % ord(b) for b in address), - print 'PLD=' + ':'.join('%02X' % ord(b) for b in payload), - print 'CRC=' + ':'.join('%02X' % ord(b) for b in crc) - - -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser('Nordic Single-Channel Receiver Example', - formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=120)) - parser.add_argument( - '-c', '--channel', type=int, help='RF channel (0-125)', default=4) - parser.add_argument('-r', '--data_rate', type=float, - help='Data Rate (250e3, 1e6 or 2e6', default=2e6, choices=[250e3, 1e6, 2e6]) - parser.add_argument('-l', '--crc_length', type=int, - help='CRC Length (1-2)', default=2, choices=[1, 2]) - parser.add_argument('-a', '--address_length', type=int, - help='Address Length (3-5)', default=5, choices=[3, 4, 5]) - parser.add_argument('-s', '--samples_per_symbol', - type=int, help='Samples Per Symbol', default=2) - parser.add_argument( - '-g', '--gain', type=float, help='Radio Gain', default=80) - - args = parser.parse_args() - - tb = top_block(args) - tb.start() - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/examples/nordic_sniffer_scanner.py b/examples/nordic_sniffer_scanner.py deleted file mode 100755 index 897e57b..0000000 --- a/examples/nordic_sniffer_scanner.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python2 - -from gnuradio import gr, blocks, digital, filter -from gnuradio.filter import firdes -import thread -import osmosdr -import nordic -import pmt -import struct -import time - - -class top_block(gr.top_block): - - def __init__(self): - gr.top_block.__init__(self, "AirHogs Sync Framer Example") - - # SDR configuration - self.freq = 2402e6 - self.gain = 70 - self.symbol_rate = 2e6 - self.sample_rate = 4e6 - - # SDR source (gr-osmosdr source) - self.osmosdr_source = osmosdr.source() - self.osmosdr_source.set_sample_rate(self.sample_rate) - self.osmosdr_source.set_center_freq(self.freq) - self.osmosdr_source.set_gain(self.gain) - self.osmosdr_source.set_antenna('TX/RX') - - # Low pass filter - self.lpf = filter.fir_filter_ccf( - 1, firdes.low_pass_2(1, self.sample_rate, self.symbol_rate / 2, 50e3, 50)) - - # GFSK demod, defaults to 2 samples per symbol - self.gfsk_demod = digital.gfsk_demod() - - # Nordic RX - self.nordic_rx = nordic.nordic_rx(3, 5, 2, 2) - - # Connect the blocks - self.connect((self.osmosdr_source, 0), (self.lpf, 0)) - self.connect((self.lpf, 0), (self.gfsk_demod, 0)) - self.connect((self.gfsk_demod, 0), (self.nordic_rx, 0)) - - # Handle incoming packets - self.nordictap_handler = microsoft_nordictap_handler(self) - self.msg_connect( - self.nordic_rx, "nordictap_out", self.nordictap_handler, "nordictap_in") - - # Tune the USRP by nRF24L channel number - def set_channel(self, channel): - - new_channel = 2400e6 + channel * 1e6 - self.osmosdr_source.set_center_freq(2400e6 + channel * 1e6) - self.nordic_rx.set_channel(channel) - - -# Microsoft mouse nordictap handler -class microsoft_nordictap_handler(gr.sync_block): - - def __init__(self, tb): - gr.sync_block.__init__( - self, name="Nordictap Handler", in_sig=None, out_sig=None) - - self.tb = tb - self.message_port_register_in(pmt.intern("nordictap_in")) - self.set_msg_handler( - pmt.intern("nordictap_in"), self.nordictap_handler) - - # Tick / channel hopping state and logic - self.last_rx = time.time() - self.last_tune = time.time() - self.ch_timeout = 0.4 # timeout a channel after 200ms - self.last_ch = 0 - thread.start_new_thread(self.tick, ()) - - # Channels and channel groups - self.channels = range(2, 84) - - # 10ms tick - def tick(self): - - while True: - - # Check for a stale channel - if ((time.time() - self.last_rx) > self.ch_timeout * 5) and \ - ((time.time() - self.last_tune) > self.ch_timeout): - - self.last_ch += 1 - if self.last_ch >= len(self.channels): - self.last_ch = 0 - print 'Tuning to 24%02i MHz' % self.channels[self.last_ch] - self.last_tune = time.time() - self.tb.set_channel(self.channels[self.last_ch]) - - # Wait 10ms - time.sleep(0.01) - - def nordictap_handler(self, msg): - - data = pmt.to_python(msg).tostring() - - # Unpack the header - values = struct.unpack('BBBBBBB', data[0:7]) - channel = values[0] - data_rate = values[1] - address_length = values[2] - payload_length = values[3] - sequence_number = values[4] - no_ack = values[5] - crc_length = values[6] - - # Parse the address, payload, and crc - address = data[7:7 + address_length] - payload = data[7 + address_length:7 + address_length + payload_length] - crc = data[7 + address_length + payload_length: - 7 + address_length + payload_length + crc_length] - - self.last_rx = time.time() - - # Print the channel, sequence number, address and payload - print 'CH=' + str(2400 + channel), - print 'SEQ=' + str(sequence_number), - print 'ADDR=' + ':'.join('%02X' % ord(b) for b in address), - print 'PLD=' + ':'.join('%02X' % ord(b) for b in payload), - print 'CRC=' + ':'.join('%02X' % ord(b) for b in crc) - - -def main(): - tb = top_block() - tb.start() - try: - raw_input('Press Enter to quit: ') - except EOFError: - pass - tb.stop() - tb.wait() - - -if __name__ == '__main__': - main() diff --git a/grc/CMakeLists.txt b/grc/CMakeLists.txt deleted file mode 100644 index b456b1a..0000000 --- a/grc/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -install(FILES - nordic_nordic_rx.xml - nordic_nordic_tx.xml DESTINATION share/gnuradio/grc/blocks -) diff --git a/grc/nordic_nordic_rx.xml b/grc/nordic_nordic_rx.xml deleted file mode 100644 index d7f3b3e..0000000 --- a/grc/nordic_nordic_rx.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - nordic_rx - nordic_nordic_rx - nordic - import nordic - nordic.nordic_rx() - - - ... - ... - ... - - - - - in - - - - - - out - - - diff --git a/grc/nordic_nordic_tx.xml b/grc/nordic_nordic_tx.xml deleted file mode 100644 index ef783c0..0000000 --- a/grc/nordic_nordic_tx.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - nordic_tx - nordic_nordic_tx - nordic - import nordic - nordic.nordic_tx() - - - ... - ... - ... - - - - - in - - - - - - out - - - diff --git a/include/nordic/CMakeLists.txt b/include/nordic/CMakeLists.txt deleted file mode 100644 index 46f73f9..0000000 --- a/include/nordic/CMakeLists.txt +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Install public header files -######################################################################## -install(FILES - api.h - nordic_rx.h - nordic_tx.h DESTINATION include/nordic -) - diff --git a/include/nordic/api.h b/include/nordic/api.h deleted file mode 100644 index a3d963e..0000000 --- a/include/nordic/api.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ - -#ifndef INCLUDED_NORDIC_API_H -#define INCLUDED_NORDIC_API_H - -#include - -#ifdef gnuradio_nordic_EXPORTS -# define NORDIC_API __GR_ATTR_EXPORT -#else -# define NORDIC_API __GR_ATTR_IMPORT -#endif - -#endif /* INCLUDED_NORDIC_API_H */ diff --git a/include/nordic/nordic_rx.h b/include/nordic/nordic_rx.h deleted file mode 100644 index a105a38..0000000 --- a/include/nordic/nordic_rx.h +++ /dev/null @@ -1,63 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#ifndef INCLUDED_NORDIC_NORDIC_RX_H -#define INCLUDED_NORDIC_NORDIC_RX_H - -#include -#include - -namespace gr { - namespace nordic { - - /*! - * \brief <+description of block+> - * \ingroup nordic - * - */ - class NORDIC_API nordic_rx : virtual public gr::sync_block - { - public: - typedef boost::shared_ptr sptr; - - /*! - * \brief Return a shared_ptr to a new instance of nordic::nordic_rx. - * - * To avoid accidental use of raw pointers, nordic::nordic_rx's - * constructor is in a private implementation - * class. nordic::nordic_rx::make is the public interface for - * creating new instances. - */ - static sptr make(uint8_t channel=0, - uint8_t address_length=5, - uint8_t crc_length=2, - uint8_t data_rate=0); - - // Channel getter/setter - virtual uint8_t get_channel()=0; - virtual void set_channel(uint8_t channel)=0; - }; - - } // namespace nordic -} // namespace gr - -#endif /* INCLUDED_NORDIC_NORDIC_RX_H */ - diff --git a/include/nordic/nordic_tx.h b/include/nordic/nordic_tx.h deleted file mode 100644 index c35291c..0000000 --- a/include/nordic/nordic_tx.h +++ /dev/null @@ -1,56 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#ifndef INCLUDED_NORDIC_NORDIC_TX_H -#define INCLUDED_NORDIC_NORDIC_TX_H - -#include -#include - -namespace gr { - namespace nordic { - - /*! - * \brief <+description of block+> - * \ingroup nordic - * - */ - class NORDIC_API nordic_tx : virtual public gr::sync_block - { - public: - typedef boost::shared_ptr sptr; - - /*! - * \brief Return a shared_ptr to a new instance of nordic::nordic_tx. - * - * To avoid accidental use of raw pointers, nordic::nordic_tx's - * constructor is in a private implementation - * class. nordic::nordic_tx::make is the public interface for - * creating new instances. - */ - static sptr make(uint8_t channel_count=1); - }; - - } // namespace nordic -} // namespace gr - -#endif /* INCLUDED_NORDIC_NORDIC_TX_H */ - diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt deleted file mode 100644 index d3a800a..0000000 --- a/lib/CMakeLists.txt +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Setup library -######################################################################## -include(GrPlatform) #define LIB_SUFFIX - -include_directories(${Boost_INCLUDE_DIR}) -link_directories(${Boost_LIBRARY_DIRS}) -list(APPEND nordic_sources - nordic_rx_impl.cc - bit_shifting_byte_vector.cc - nordic_tx_impl.cc - enhanced_shockburst_packet.cc ) - -set(nordic_sources "${nordic_sources}" PARENT_SCOPE) -if(NOT nordic_sources) - MESSAGE(STATUS "No C++ sources... skipping lib/") - return() -endif(NOT nordic_sources) - -add_library(gnuradio-nordic SHARED ${nordic_sources}) -target_link_libraries(gnuradio-nordic ${Boost_LIBRARIES} ${GNURADIO_ALL_LIBRARIES}) -set_target_properties(gnuradio-nordic PROPERTIES DEFINE_SYMBOL "gnuradio_nordic_EXPORTS") - -if(APPLE) - set_target_properties(gnuradio-nordic PROPERTIES - INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib" - ) -endif(APPLE) - -######################################################################## -# Install built library files -######################################################################## -install(TARGETS gnuradio-nordic - LIBRARY DESTINATION lib${LIB_SUFFIX} # .so/.dylib file - ARCHIVE DESTINATION lib${LIB_SUFFIX} # .lib file - RUNTIME DESTINATION bin # .dll file -) - -######################################################################## -# Build and register unit test -######################################################################## -include(GrTest) - -include_directories(${CPPUNIT_INCLUDE_DIRS}) - -list(APPEND test_nordic_sources - ${CMAKE_CURRENT_SOURCE_DIR}/test_nordic.cc - ${CMAKE_CURRENT_SOURCE_DIR}/qa_nordic.cc -) - -add_executable(test-nordic ${test_nordic_sources}) - -target_link_libraries( - test-nordic - ${GNURADIO_RUNTIME_LIBRARIES} - ${Boost_LIBRARIES} - ${CPPUNIT_LIBRARIES} - gnuradio-nordic -) - -GR_ADD_TEST(test_nordic test-nordic) diff --git a/lib/bit_shifting_byte_vector.cc b/lib/bit_shifting_byte_vector.cc deleted file mode 100644 index 180b6fe..0000000 --- a/lib/bit_shifting_byte_vector.cc +++ /dev/null @@ -1,79 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#include "bit_shifting_byte_vector.h" - -#include - -// Constructor -bit_shifting_byte_vector::bit_shifting_byte_vector(int length) : - m_length(length), - m_previous_state_write(7), - m_previous_state_read(0) -{ - m_bytes = new unsigned char[m_length]; - for(int x = 0; x < 8; x++) - m_previous_states[x] = new unsigned char[m_length]; -} - -// Copy constructor -bit_shifting_byte_vector::bit_shifting_byte_vector(const bit_shifting_byte_vector ©) -{ - m_length = copy.m_length; - m_bytes = new unsigned char[m_length]; - memcpy(m_bytes, copy.m_bytes, m_length); - for(int x = 0; x < 8; x++) - { - m_previous_states[x] = new unsigned char[m_length]; - memcpy(m_previous_states[x], copy.m_previous_states[x], m_length); - } - m_previous_state_write = copy.m_previous_state_write; - m_previous_state_read = copy.m_previous_state_read; -} - -// Destructor -bit_shifting_byte_vector::~bit_shifting_byte_vector() -{ - delete[] m_bytes; - for(int x = 0; x < 8; x++) - { - delete[] m_previous_states[x]; - } -} - -// Add a new bit -void bit_shifting_byte_vector::add_bit(unsigned char bit) -{ - // Update the previous state vector - memcpy(m_previous_states[m_previous_state_write], m_bytes, m_length); - - // For the left m_length-1 bytes, we'll - // use values from m_previous_states - memcpy(m_bytes, m_previous_states[m_previous_state_read]+1, m_length - 1); - - // For the rightmost byte, we'll tack on - // the the current new bit - m_bytes[m_length - 1] = m_bytes[m_length - 1] << 1 | bit; - - // Update the previous state indices - if(++m_previous_state_read > 7) m_previous_state_read = 0; - if(++m_previous_state_write > 7) m_previous_state_write = 0; -} \ No newline at end of file diff --git a/lib/bit_shifting_byte_vector.h b/lib/bit_shifting_byte_vector.h deleted file mode 100644 index d8f2c5f..0000000 --- a/lib/bit_shifting_byte_vector.h +++ /dev/null @@ -1,72 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#ifndef BIT_SHIFTING_BYTE_VECTOR_H -#define BIT_SHIFTING_BYTE_VECTOR_H - -// Byte vector that can be added to one bit at a time -class bit_shifting_byte_vector -{ -public: - - // Constructor - bit_shifting_byte_vector(int length); - - // Copy constructor - bit_shifting_byte_vector(const bit_shifting_byte_vector ©); - - // Destructor - ~bit_shifting_byte_vector(); - - // Add a new bit - void add_bit(unsigned char bit); - - // Get the byte array - const unsigned char * bytes() { return m_bytes; } - - // Get a previous byte array state - const unsigned char * bytes(int index) - { - index += m_previous_state_read; - while(index < 0) index += 7; - while(index > 7) index -= 7; - return m_previous_states[index]; - } - -private: - - // Bytes - current state - unsigned char * m_bytes; - - // Length, in bytes - int m_length; - - // Bytes - previous 8 states - unsigned char * m_previous_states[8]; - - // Previous state index - write - int m_previous_state_write; - - // Previous state index - read - int m_previous_state_read; -}; - -#endif // BIT_SHIFTING_BYTE_VECTOR_H diff --git a/lib/enhanced_shockburst_packet.cc b/lib/enhanced_shockburst_packet.cc deleted file mode 100644 index 1f3efe7..0000000 --- a/lib/enhanced_shockburst_packet.cc +++ /dev/null @@ -1,178 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#include -#include -#include - -#include "enhanced_shockburst_packet.h" - -enhanced_shockburst_packet::enhanced_shockburst_packet(uint8_t address_length, - uint8_t payload_length, - uint8_t sequence_number, - uint8_t no_ack, - uint8_t crc_length, - uint8_t * address, - uint8_t * payload - ) : - m_address_length(address_length), - m_payload_length(payload_length), - m_sequence_number(sequence_number), - m_no_ack(no_ack), - m_crc_length(crc_length) -{ - // Allocate buffers - m_address = new uint8_t[m_address_length]; - m_payload = new uint8_t[m_payload_length]; - m_crc = new uint8_t[m_crc_length]; - - // Copy over address and payload - memcpy(m_address, address, m_address_length); - memcpy(m_payload, payload, m_payload_length); - - // Build the packet bytes - const int blen = 3 /* preamble + PCF */ + - m_crc_length + - m_address_length + - m_payload_length; - m_packet_length_bytes = blen; - m_packet_length_bits = blen*8; - m_packet_bytes = new uint8_t[blen]; - - // Preamble - if((address[0] & 0x80) == 0x80) m_packet_bytes[0] = 0xAA; - else m_packet_bytes[0] = 0x55; - - // Address - memcpy(&m_packet_bytes[1], address, m_address_length); - - // PCF - m_packet_bytes[1 + m_address_length] = (m_payload_length & 0x3F) << 2; - m_packet_bytes[1 + m_address_length] |= (m_sequence_number & 0x3); - m_packet_bytes[2 + m_address_length] = m_no_ack << 7; - - // Payload - for(int b = 0; b < m_payload_length; b++) - { - m_packet_bytes[2 + m_address_length + b] |= (payload[b] >> 1); - m_packet_bytes[3 + m_address_length + b] |= (payload[b] << 7); - } - - // Calculate the CRC - uint16_t crc = 0xFFFF; - for(int b = 1; b < 7 + m_payload_length; b++) crc = crc_update(crc, m_packet_bytes[b]); - crc = crc_update(crc, m_packet_bytes[7 + m_payload_length] & 0x80, 1); - memcpy(m_crc, &crc, m_crc_length); - m_packet_bytes[2 + m_address_length + m_payload_length] |= ((crc >> 9) & 0xFF); - m_packet_bytes[3 + m_address_length + m_payload_length] |= ((crc >> 1) & 0xFF); - m_packet_bytes[4 + m_address_length + m_payload_length] |= ((crc << 7) & 0x80); -} - -// Destructur -enhanced_shockburst_packet::~enhanced_shockburst_packet() -{ - delete[] m_address; - delete[] m_payload; - delete[] m_crc; -} - -// Attempt to parse a packet from some incoming bytes -bool enhanced_shockburst_packet::try_parse(const uint8_t * bytes, - const uint8_t * bytes_shifted, - uint8_t address_length, - uint8_t crc_length, - enhanced_shockburst_packet *& packet) -{ - // Read the payload length - uint8_t payload_length = bytes[6] >> 2; - if(payload_length > 32) return false; - - // Read the address - uint8_t * address = new uint8_t[address_length]; - memcpy(address, &bytes[1], address_length); - - // Read the given CRC - uint16_t crc_given; - memcpy(&crc_given, &bytes_shifted[8 + payload_length], 2); - - // Calculate the CRC - uint16_t crc = 0xFFFF; - for(int b = 1; b < 7 + payload_length; b++) crc = crc_update(crc, bytes[b]); - crc = crc_update(crc, bytes[7 + payload_length] & 0x80, 1); - crc = htons(crc); - - // Validate the CRC - if(memcmp(&crc, &crc_given, 2) != 0) - { - delete[] address; - return false; - } - - // Read the sequence number and no-ACK bit - uint8_t seq = bytes[6] & 0x3; - uint8_t no_ack = bytes[7] >> 7; - - // Read the payload - uint8_t payload[32]; - memcpy(payload, &bytes_shifted[8], payload_length); - - // Update the fields - packet = new enhanced_shockburst_packet(address_length, payload_length, seq, no_ack, crc_length, address, payload); - - // Cleanup - delete[] address; - - return true; -} - -// Print the packet details to standard out -void enhanced_shockburst_packet::print() -{ - printf("Address: "); - for(int x = 0; x < m_address_length; x++) printf("%02X ", m_address[x]); - printf("\n"); - - printf("Payload: "); - for(int x = 0; x < m_payload_length; x++) printf("%02X ", m_payload[x]); - printf("\n"); - - printf("CRC: "); - for(int x = 0; x < m_crc_length; x++) printf("%02X ", m_crc[x]); - printf("\n"); - - printf("Bytes: "); - for(int x = 0; x < m_packet_length_bytes; x++) printf("%02X ", m_packet_bytes[x]); - printf("\n"); - - printf("\n"); -} - -// Process a crc byte (or partial byte) -uint16_t enhanced_shockburst_packet::crc_update (uint16_t crc, uint8_t data, uint8_t bits) -{ - crc = crc ^ ((uint16_t)data << 8); - for (int x = 0; x < bits; x++) - { - if(crc & 0x8000) crc = (crc << 1) ^ 0x1021; - else crc <<= 1; - } - return crc; -} \ No newline at end of file diff --git a/lib/enhanced_shockburst_packet.h b/lib/enhanced_shockburst_packet.h deleted file mode 100644 index 3db60fc..0000000 --- a/lib/enhanced_shockburst_packet.h +++ /dev/null @@ -1,102 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - - -#ifndef ENHANCED_SHOCKBURST_PACKET_H -#define ENHANCED_SHOCKBURST_PACKET_H - -#include - -class enhanced_shockburst_packet -{ -public: - - // Constructor - enhanced_shockburst_packet(uint8_t address_length, - uint8_t payload_length, - uint8_t sequence_number, - uint8_t no_ack, - uint8_t crc_length, - uint8_t * address, - uint8_t * payload - ); - - // Destructur - ~enhanced_shockburst_packet(); - - // Attempt to parse a packet from some incoming bytes - static bool try_parse(const uint8_t * bytes, - const uint8_t * bytes_shifted, - uint8_t address_length, - uint8_t crc_length, - enhanced_shockburst_packet *& packet); - - // Process a crc byte (or partial byte) - static uint16_t crc_update (uint16_t crc, uint8_t data, uint8_t bits=8); - - // Print the packet details to standard out - void print(); - - // Getters - const uint8_t payload_length() { return m_payload_length; } - const uint8_t bytes_length() { return m_packet_length_bytes; } - const uint8_t sequence_number() { return m_sequence_number; } - const uint8_t no_ack() { return m_no_ack; } - const uint8_t * address() { return m_address; } - const uint8_t * payload() { return m_payload; } - const uint8_t * crc() { return m_crc; } - const uint8_t * bytes() { return m_packet_bytes; } - -private: - - // Address length - uint8_t m_address_length; - - // Payload length - uint8_t m_payload_length; - - // Packet length - uint8_t m_packet_length_bytes; - uint16_t m_packet_length_bits; - - // Sequence number (ESB) - uint8_t m_sequence_number; - - // No ACK bit (ESB) - bool m_no_ack; - - // CRC length - uint8_t m_crc_length; - - // Address - uint8_t * m_address; - - // Payload - uint8_t * m_payload; - - // CRC - uint8_t * m_crc; - - // Assembled packet bytes - uint8_t * m_packet_bytes; - -} __attribute__((packed)); - -#endif // ENHANCED_SHOCKBURST_PACKET_H \ No newline at end of file diff --git a/lib/nordic_rx_impl.cc b/lib/nordic_rx_impl.cc deleted file mode 100644 index 2cb45c5..0000000 --- a/lib/nordic_rx_impl.cc +++ /dev/null @@ -1,152 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include -#include "nordic_rx_impl.h" -#include "nordictap.h" - -namespace gr { - namespace nordic { - - nordic_rx::sptr - nordic_rx::make(uint8_t channel, - uint8_t address_length, - uint8_t crc_length, - uint8_t data_rate) - { - return gnuradio::get_initial_sptr - (new nordic_rx_impl(channel, address_length, crc_length, data_rate)); - } - - /* - * The private constructor - */ - nordic_rx_impl::nordic_rx_impl(uint8_t channel, - uint8_t address_length, - uint8_t crc_length, - uint8_t data_rate) - : gr::sync_block("nordic_rx", - gr::io_signature::make(1, 1, sizeof(uint8_t)), - gr::io_signature::make(0, 0, 0)), - m_decoded_bits_bytes(42*8 /* buffer sufficient for max ESB frame length */), - m_crc_length(crc_length), - m_address_length(address_length), - m_channel(channel), - m_data_rate(data_rate) - { - message_port_register_out(pmt::mp("nordictap_out")); - } - - /* - * Our virtual destructor. - */ - nordic_rx_impl::~nordic_rx_impl() - { - - } - - int - nordic_rx_impl::work(int noutput_items, - gr_vector_const_void_star &input_items, - gr_vector_void_star &output_items) - { - const uint8_t *in = (const uint8_t *) input_items[0]; - - // Step through the incoming demodulated bits - for(int x = 0; x < noutput_items; x++) - { - // Add the incoming bit to the bit shifted byte array - m_decoded_bits_bytes.add_bit(in[x]); - const uint8_t * bytes = m_decoded_bits_bytes.bytes(); - - // Check for a valid preamble - if(bytes[0] == 0xAA || bytes[0] == 0x55) - { - // Check for a valid first address bit - if((bytes[0] & 0x80) == (bytes[1] & 0x80)) - { - // Attempt to decode a payload - if(enhanced_shockburst_packet::try_parse(bytes, - m_decoded_bits_bytes.bytes(0), - m_address_length, - m_crc_length, - m_enhanced_shockburst)) - { - // Build the wireshark header - nordictap_header header; - header.channel = m_channel; - header.data_rate = m_data_rate; - header.address_length = m_address_length; - header.payload_length = m_enhanced_shockburst->payload_length(); - header.sequence_number = m_enhanced_shockburst->sequence_number(); - header.no_ack = m_enhanced_shockburst->no_ack(); - header.crc_length = m_crc_length; - - // Concatenate the header, address, payload, and CRC - uint8_t buffer_length = sizeof(nordictap_header) + m_address_length + header.payload_length + m_crc_length; - uint8_t * buffer = new uint8_t[buffer_length]; - memcpy(&buffer[0], &header, sizeof(nordictap_header)); - memcpy(&buffer[sizeof(nordictap_header)], m_enhanced_shockburst->address(), m_address_length); - memcpy(&buffer[sizeof(nordictap_header) + m_address_length], m_enhanced_shockburst->payload(), header.payload_length); - memcpy(&buffer[sizeof(nordictap_header) + m_address_length + header.payload_length], m_enhanced_shockburst->crc(), m_crc_length); - - // Send the packet to wireshark - boost::asio::io_service io_service; - boost::asio::ip::udp::resolver resolver(io_service); - boost::asio::ip::udp::resolver::query query(boost::asio::ip::udp::v4(), "127.0.0.1", "9451"); - boost::asio::ip::udp::endpoint receiver_endpoint = *resolver.resolve(query); - boost::asio::ip::udp::socket socket(io_service); - socket.open(boost::asio::ip::udp::v4()); - socket.send_to(boost::asio::buffer(buffer, buffer_length), receiver_endpoint); - - // Send the packet to nordictap_out - message_port_pub(pmt::intern("nordictap_out"), pmt::init_u8vector(buffer_length, buffer)); - - // Cleanup - delete[] buffer; - } - } - } - } - - return noutput_items; - } - - // Channel getter - uint8_t nordic_rx_impl::get_channel() - { - return m_channel; - } - - // Channel setter - void nordic_rx_impl::set_channel(uint8_t channel) - { - m_channel = channel; - } - - } /* namespace nordic */ -} /* namespace gr */ - diff --git a/lib/nordic_rx_impl.h b/lib/nordic_rx_impl.h deleted file mode 100644 index adcf668..0000000 --- a/lib/nordic_rx_impl.h +++ /dev/null @@ -1,74 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifndef INCLUDED_NORDIC_NORDIC_RX_IMPL_H -#define INCLUDED_NORDIC_NORDIC_RX_IMPL_H - -#include -#include -#include "bit_shifting_byte_vector.h" -#include "enhanced_shockburst_packet.h" - -namespace gr { - namespace nordic { - - class nordic_rx_impl : public nordic_rx - { - private: - - // CRC16-CCITT - boost::crc_optimal<16, 0x1021, 0, 0, false, false> m_crc; - - // Configuration - uint8_t m_address_length; - uint8_t m_crc_length; - uint8_t m_channel; - uint8_t m_data_rate; - - // Incoming bit/byte vector - bit_shifting_byte_vector m_decoded_bits_bytes; - - // Enhanced shockburst packet - enhanced_shockburst_packet * m_enhanced_shockburst; - - public: - - // Constructor/destructor - nordic_rx_impl(uint8_t channel, - uint8_t address_length, - uint8_t crc_length, - uint8_t data_rate); - ~nordic_rx_impl(); - - // Main work method - int work(int noutput_items, - gr_vector_const_void_star &input_items, - gr_vector_void_star &output_items); - - // Channel getter/setter - uint8_t get_channel(); - void set_channel(uint8_t channel); - }; - - } // namespace nordic -} // namespace gr - -#endif /* INCLUDED_NORDIC_NORDIC_RX_IMPL_H */ - diff --git a/lib/nordic_tx_impl.cc b/lib/nordic_tx_impl.cc deleted file mode 100644 index 019f39e..0000000 --- a/lib/nordic_tx_impl.cc +++ /dev/null @@ -1,155 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include -#include "nordic_tx_impl.h" -#include "nordictap.h" -#include "enhanced_shockburst_packet.h" - -namespace gr { - namespace nordic { - - nordic_tx::sptr - nordic_tx::make(uint8_t channel_count) - { - return gnuradio::get_initial_sptr - (new nordic_tx_impl(channel_count)); - } - - /* - * The private constructor - */ - nordic_tx_impl::nordic_tx_impl(uint8_t channel_count) - : gr::sync_block("nordic_tx", - gr::io_signature::make(0, 0, 0), - gr::io_signature::make(1, channel_count, sizeof(uint8_t))), - m_channel_count(channel_count) - { - // Register nordictap input, which accepts packets to transmit - message_port_register_in(pmt::intern("nordictap_in")); - set_msg_handler(pmt::intern("nordictap_in"), boost::bind(&nordic_tx_impl::nordictap_message_handler, this, _1)); - } - - /* - * Our virtual destructor. - */ - nordic_tx_impl::~nordic_tx_impl() - { - } - - // Incoming message handler - void nordic_tx_impl::nordictap_message_handler(pmt::pmt_t msg) - { - m_tx_queue.push(msg); - } - - int - nordic_tx_impl::work(int noutput_items, - gr_vector_const_void_star &input_items, - gr_vector_void_star &output_items) - { - // uint8_t *out = (uint8_t *) output_items[0]; - - // Check for new input - if(m_tx_queue.size()) - { - // Get the blob - std::vector vec = pmt::u8vector_elements(m_tx_queue.front()); - uint8_t * blob = vec.data(); - - // Read the channel index - uint8_t channel = blob[0]; - - // Read the nordictap header, address, and payload - nordictap_header header; - memcpy(&header, &blob[1], sizeof(nordictap_header)); - - // Read the address and payload - const uint8_t alen = header.address_length; - const uint8_t plen = header.payload_length; - uint8_t * address = new uint8_t[alen]; - uint8_t * payload = new uint8_t[plen]; - memcpy(address, &blob[sizeof(nordictap_header)+1], alen); - memcpy(payload, &blob[sizeof(nordictap_header)+1 + alen], plen); - - // Build the packet - enhanced_shockburst_packet * packet = - new enhanced_shockburst_packet(header.address_length, - header.payload_length, - header.sequence_number, - header.no_ack, - header.crc_length, - address, - payload); - - // Remove the blob from the queue - m_tx_queue.pop(); - - // Write the output bytes - uint8_t * out = (uint8_t *)output_items[channel]; - for(int b = 0; b < packet->bytes_length(); b++) - { - out[b] = packet->bytes()[b]; - out[packet->bytes_length()*2+b] = packet->bytes()[b]; - } - - // Write zeros to the other channels' buffers - for(int c = 0; c < m_channel_count; c++) - { - if(c != channel) - { - memset(output_items[c], 0, packet->bytes_length()*2); - } - } - - // Cleanup - delete[] address; - delete[] payload; - delete packet; - - // Return the number of bytes produced - return packet->bytes_length()*2; - } - else - { - return 0; - } - } - - // Process a crc byte (or partial byte) - uint16_t nordic_tx_impl::crc_update (uint16_t crc, uint8_t data, uint8_t bits) - { - crc = crc ^ ((uint16_t)data << 8); - for (int x = 0; x < bits; x++) - { - if(crc & 0x8000) crc = (crc << 1) ^ 0x1021; - else crc <<= 1; - } - return crc; - } - - } /* namespace nordic */ -} /* namespace gr */ - diff --git a/lib/nordic_tx_impl.h b/lib/nordic_tx_impl.h deleted file mode 100644 index 3c401e7..0000000 --- a/lib/nordic_tx_impl.h +++ /dev/null @@ -1,63 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2016 Bastille Networks. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifndef INCLUDED_NORDIC_NORDIC_TX_IMPL_H -#define INCLUDED_NORDIC_NORDIC_TX_IMPL_H - -#include -#include - -namespace gr { - namespace nordic { - - class nordic_tx_impl : public nordic_tx - { - private: - - // TX nordictap queue - std::queue m_tx_queue; - - // Lookup table to unpack bytes to bits (NRZ) - char m_unpack_table[256][8]; - - // Number of output channels - uint8_t m_channel_count; - - // Incoming message handler - void nordictap_message_handler(pmt::pmt_t msg); - - // Process a crc byte (or partial byte) - uint16_t crc_update (uint16_t crc, uint8_t data, uint8_t bits=8); - - public: - nordic_tx_impl(uint8_t channel_count); - ~nordic_tx_impl(); - - // Where all the action really happens - int work(int noutput_items, - gr_vector_const_void_star &input_items, - gr_vector_void_star &output_items); - }; - - } // namespace nordic -} // namespace gr - -#endif /* INCLUDED_NORDIC_NORDIC_TX_IMPL_H */ - diff --git a/lib/nordictap.h b/lib/nordictap.h deleted file mode 100644 index d9e4e52..0000000 --- a/lib/nordictap.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef NORDICTAP_H -#define NORDICTAP_H - -#define NORDICTAP_GSM_PORT 9451 - -// Data rates -#define NORDICTAP_RATE_250K 0 -#define NORDICTAP_RATE_1M 1 -#define NORDICTAP_RATE_2M 2 - -struct nordictap_header -{ - // Channel number - uint8_t channel; - - // Data rate - uint8_t data_rate; - - // Address length - uint8_t address_length; - - // Payload length - uint8_t payload_length; - - // Sequence number (ESB) - uint8_t sequence_number; - - // No ACK bit (ESB) - bool no_ack; - - // CRC length, in bytes - uint8_t crc_length; - -} __attribute__((packed)); - -#endif // NORDICTAP_H \ No newline at end of file diff --git a/lib/qa_nordic.cc b/lib/qa_nordic.cc deleted file mode 100644 index 14c4244..0000000 --- a/lib/qa_nordic.cc +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ - -/* - * This class gathers together all the test cases for the gr-filter - * directory into a single test suite. As you create new test cases, - * add them here. - */ - -#include "qa_nordic.h" - -CppUnit::TestSuite * -qa_nordic::suite() -{ - CppUnit::TestSuite *s = new CppUnit::TestSuite("nordic"); - - return s; -} diff --git a/lib/qa_nordic.h b/lib/qa_nordic.h deleted file mode 100644 index 9f57ecd..0000000 --- a/lib/qa_nordic.h +++ /dev/null @@ -1,34 +0,0 @@ -/* -*- c++ -*- */ -/* - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ - -#ifndef _QA_NORDIC_H_ -#define _QA_NORDIC_H_ - -#include -#include - -//! collect all the tests for the gr-filter directory - -class __GR_ATTR_EXPORT qa_nordic -{ - public: - //! return suite of tests for all of gr-filter directory - static CppUnit::TestSuite *suite(); -}; - -#endif /* _QA_NORDIC_H_ */ diff --git a/lib/test_nordic.cc b/lib/test_nordic.cc deleted file mode 100644 index 2051b6b..0000000 --- a/lib/test_nordic.cc +++ /dev/null @@ -1,44 +0,0 @@ -/* -*- c++ -*- */ -/* - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include - -#include -#include "qa_nordic.h" -#include -#include - -int -main (int argc, char **argv) -{ - CppUnit::TextTestRunner runner; - std::ofstream xmlfile(get_unittest_path("nordic.xml").c_str()); - CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile); - - runner.addTest(qa_nordic::suite()); - runner.setOutputter(xmlout); - - bool was_successful = runner.run("", false); - - return was_successful ? 0 : 1; -} diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt deleted file mode 100644 index 5e26f62..0000000 --- a/python/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Include python install macros -######################################################################## -include(GrPython) -if(NOT PYTHONINTERP_FOUND) - return() -endif() - -######################################################################## -# Install python sources -######################################################################## -GR_PYTHON_INSTALL( - FILES - __init__.py - DESTINATION ${GR_PYTHON_DIR}/nordic -) - -######################################################################## -# Handle the unit tests -######################################################################## -include(GrTest) - -set(GR_TEST_TARGET_DEPS gnuradio-nordic) -set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig) diff --git a/python/__init__.py b/python/__init__.py deleted file mode 100644 index ec593b2..0000000 --- a/python/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -# The presence of this file turns this directory into a Python package - -''' -This is the GNU Radio NORDIC module. Place your Python package -description here (python/__init__.py). -''' - -# import swig generated symbols into the nordic namespace -try: - # this might fail if the module is python-only - from nordic_swig import * -except ImportError: - pass - -# import any pure python here - - -# diff --git a/python/build_utils.py b/python/build_utils.py deleted file mode 100644 index 6570080..0000000 --- a/python/build_utils.py +++ /dev/null @@ -1,218 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -"""Misc utilities used at build time -""" - -import re, os, os.path -from build_utils_codes import * - - -# set srcdir to the directory that contains Makefile.am -try: - srcdir = os.environ['srcdir'] -except KeyError, e: - srcdir = "." -srcdir = srcdir + '/' - -# set do_makefile to either true or false dependeing on the environment -try: - if os.environ['do_makefile'] == '0': - do_makefile = False - else: - do_makefile = True -except KeyError, e: - do_makefile = False - -# set do_sources to either true or false dependeing on the environment -try: - if os.environ['do_sources'] == '0': - do_sources = False - else: - do_sources = True -except KeyError, e: - do_sources = True - -name_dict = {} - -def log_output_name (name): - (base, ext) = os.path.splitext (name) - ext = ext[1:] # drop the leading '.' - - entry = name_dict.setdefault (ext, []) - entry.append (name) - -def open_and_log_name (name, dir): - global do_sources - if do_sources: - f = open (name, dir) - else: - f = None - log_output_name (name) - return f - -def expand_template (d, template_filename, extra = ""): - '''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file - ''' - global do_sources - output_extension = extract_extension (template_filename) - template = open_src (template_filename, 'r') - output_name = d['NAME'] + extra + '.' + output_extension - log_output_name (output_name) - if do_sources: - output = open (output_name, 'w') - do_substitution (d, template, output) - output.close () - template.close () - -def output_glue (dirname): - output_makefile_fragment () - output_ifile_include (dirname) - -def output_makefile_fragment (): - global do_makefile - if not do_makefile: - return -# overwrite the source, which must be writable; this should have been -# checked for beforehand in the top-level Makefile.gen.gen . - f = open (os.path.join (os.environ.get('gendir', os.environ.get('srcdir', '.')), 'Makefile.gen'), 'w') - f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n') - output_subfrag (f, 'h') - output_subfrag (f, 'i') - output_subfrag (f, 'cc') - f.close () - -def output_ifile_include (dirname): - global do_sources - if do_sources: - f = open ('%s_generated.i' % (dirname,), 'w') - f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n') - files = name_dict.setdefault ('i', []) - files.sort () - f.write ('%{\n') - for file in files: - f.write ('#include <%s>\n' % (file[0:-1] + 'h',)) - f.write ('%}\n\n') - for file in files: - f.write ('%%include <%s>\n' % (file,)) - -def output_subfrag (f, ext): - files = name_dict.setdefault (ext, []) - files.sort () - f.write ("GENERATED_%s =" % (ext.upper ())) - for file in files: - f.write (" \\\n\t%s" % (file,)) - f.write ("\n\n") - -def extract_extension (template_name): - # template name is something like: GrFIRfilterXXX.h.t - # we return everything between the penultimate . and .t - mo = re.search (r'\.([a-z]+)\.t$', template_name) - if not mo: - raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,) - return mo.group (1) - -def open_src (name, mode): - global srcdir - return open (os.path.join (srcdir, name), mode) - -def do_substitution (d, in_file, out_file): - def repl (match_obj): - key = match_obj.group (1) - # print key - return d[key] - - inp = in_file.read () - out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp) - out_file.write (out) - - - -copyright = '''/* -*- c++ -*- */ -/* - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ -''' - -def is_complex (code3): - if i_code (code3) == 'c' or o_code (code3) == 'c': - return '1' - else: - return '0' - - -def standard_dict (name, code3, package='gr'): - d = {} - d['NAME'] = name - d['NAME_IMPL'] = name+'_impl' - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['GUARD_NAME_IMPL'] = 'INCLUDED_%s_%s_IMPL_H' % (package.upper(), name.upper()) - d['BASE_NAME'] = re.sub ('^' + package + '_', '', name) - d['SPTR_NAME'] = '%s_sptr' % name - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d - - -def standard_dict2 (name, code3, package): - d = {} - d['NAME'] = name - d['BASE_NAME'] = name - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d - -def standard_impl_dict2 (name, code3, package): - d = {} - d['NAME'] = name - d['IMPL_NAME'] = name - d['BASE_NAME'] = name.rstrip("impl").rstrip("_") - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['FIR_TYPE'] = "fir_filter_" + code3 - d['CFIR_TYPE'] = "fir_filter_" + code3[0:2] + 'c' - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d diff --git a/python/build_utils_codes.py b/python/build_utils_codes.py deleted file mode 100644 index fd7bd18..0000000 --- a/python/build_utils_codes.py +++ /dev/null @@ -1,48 +0,0 @@ -''' - Copyright (C) 2016 Bastille Networks - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -''' - -def i_code (code3): - return code3[0] - -def o_code (code3): - if len (code3) >= 2: - return code3[1] - else: - return code3[0] - -def tap_code (code3): - if len (code3) >= 3: - return code3[2] - else: - return code3[0] - -def i_type (code3): - return char_to_type[i_code (code3)] - -def o_type (code3): - return char_to_type[o_code (code3)] - -def tap_type (code3): - return char_to_type[tap_code (code3)] - - -char_to_type = {} -char_to_type['s'] = 'short' -char_to_type['i'] = 'int' -char_to_type['f'] = 'float' -char_to_type['c'] = 'gr_complex' -char_to_type['b'] = 'unsigned char' diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt deleted file mode 100644 index 277a0d3..0000000 --- a/swig/CMakeLists.txt +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2016 Bastille Networks -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -######################################################################## -# Check if there is C++ code at all -######################################################################## -if(NOT nordic_sources) - MESSAGE(STATUS "No C++ sources... skipping swig/") - return() -endif(NOT nordic_sources) - -######################################################################## -# Include swig generation macros -######################################################################## -find_package(SWIG) -find_package(PythonLibs 2) -if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND) - return() -endif() -include(GrSwig) -include(GrPython) - -######################################################################## -# Setup swig generation -######################################################################## -foreach(incdir ${GNURADIO_RUNTIME_INCLUDE_DIRS}) - list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/gnuradio/swig) -endforeach(incdir) - -set(GR_SWIG_LIBRARIES gnuradio-nordic) -set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/nordic_swig_doc.i) -set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include) - -GR_SWIG_MAKE(nordic_swig nordic_swig.i) - -######################################################################## -# Install the build swig module -######################################################################## -GR_SWIG_INSTALL(TARGETS nordic_swig DESTINATION ${GR_PYTHON_DIR}/nordic) - -######################################################################## -# Install swig .i files for development -######################################################################## -install( - FILES - nordic_swig.i - ${CMAKE_CURRENT_BINARY_DIR}/nordic_swig_doc.i - DESTINATION ${GR_INCLUDE_DIR}/nordic/swig -) diff --git a/swig/nordic_swig.i b/swig/nordic_swig.i deleted file mode 100644 index e4ea82a..0000000 --- a/swig/nordic_swig.i +++ /dev/null @@ -1,19 +0,0 @@ -/* -*- c++ -*- */ - -#define NORDIC_API - -%include "gnuradio.i" // the common stuff - -//load generated python docstrings -%include "nordic_swig_doc.i" - -%{ -#include "nordic/nordic_rx.h" -#include "nordic/nordic_tx.h" -%} - - -%include "nordic/nordic_rx.h" -GR_SWIG_BLOCK_MAGIC2(nordic, nordic_rx); -%include "nordic/nordic_tx.h" -GR_SWIG_BLOCK_MAGIC2(nordic, nordic_tx); diff --git a/wireshark/nordic_dissector.lua b/wireshark/nordic_dissector.lua deleted file mode 100644 index 822cd99..0000000 --- a/wireshark/nordic_dissector.lua +++ /dev/null @@ -1,115 +0,0 @@ --- trivial protocol example --- declare our protocol -nordic_proto = Proto("nordic","Nordic Semiconductor nRF24L") --- create a function to dissect it -function nordic_proto.dissector(buffer,pinfo,tree) - pinfo.cols.protocol = "NORDIC" - local subtree = tree:add(nordic_proto,buffer(),"nRF24L Packet") - pinfo.cols.info = "" - - -- channel - local channel = buffer(0,1):uint() - - -- data rate - local data_rate = buffer(1,1):uint() - local data_rate_string = "" - if data_rate == 0 then data_rate_string = "250K" end - if data_rate == 1 then data_rate_string = "1M" end - if data_rate == 2 then data_rate_string = "2M" end - - -- address length - local address_length = buffer(2,1):uint() - - -- payload length - local payload_length = buffer(3,1):uint() - - -- sequence number - local sequence_number = buffer(4,1):uint() - - -- no ack bit - local no_ack = buffer(5,1):uint() - - -- crc length - local crc_length = buffer(6,1):uint() - - -- address - local address = buffer(7,address_length) - local address_bytes = address:bytes() - - -- payload - local payload = buffer(7+address_length,payload_length) - local payload_bytes = payload:bytes() - - -- crc - local crc = buffer(7+address_length+payload_length, crc_length) - - subtree:add(buffer(0,1), "Channel: " .. (2400+channel) .. "MHz") - subtree:add(buffer(1,1), "Data Rate: " .. data_rate_string) - subtree:add(buffer(2,1), "Address Length: " .. address_length) - subtree:add(buffer(3,1), "Payload Length: " .. payload_length) - subtree:add(buffer(4,1), "Sequence Number: " .. sequence_number) - subtree:add(buffer(5,1), "No ACK: " .. no_ack) - subtree:add(buffer(6,1), "CRC Length: " .. crc_length) - subtree:add(buffer(7,address_length), "Address: " .. address) - subtree:add(buffer(7+address_length,payload_length), "Payload: " .. payload) - subtree:add(buffer(7+address_length+payload_length, crc_length), "CRC: " .. crc) - - -- Keepalive (vendor agnostic) - if payload_bytes:len() == 0 then - pinfo.cols.info = "ACK" - end - - -- Microsoft - if bit.band(buffer(7,1):uint(), 0xF0) == 0xA0 and payload_length > 0 then - - -- Validate the checksum (AES encrypted series) - local sum = 0xFF - for x = 0, payload_bytes:len()-1 do - sum = bit.bxor(sum, payload_bytes:get_index(x)) - end - sum = bit.band(sum, 0xFF) - if sum == 0 then - - -- Microsoft keepalive - if payload_bytes:get_index(1) == 0x38 and payload_bytes:len() == 8 then - pinfo.cols.info = "Keepalive" - end - - -- Microsoft mouse movement/click - if payload_bytes:get_index(1) == 0x90 then - - local vtree = subtree:add(nordic_proto, buffer(), "Microsoft Movement/Click") - pinfo.cols.info = "Microsoft Mouse Movement/Click" - vtree:add(string.format("Device Type: 0x%02x", payload_bytes:get_index(2))) - - -- Movement X/Y - local movement_x = payload(9, 2):le_int() - local movement_y = payload(11, 2):le_int() - local scroll = payload(13, 2):le_int() - vtree:add(string.format("Movement X: %i", movement_x)) - vtree:add(string.format("Movement Y: %i", movement_y)) - vtree:add(string.format("Scroll Wheel: %i", scroll)) - - -- Button mask - local button_mask = payload(8, 1):uint() - vtree:add(string.format("Button 0: %i", bit.band(bit.rshift(button_mask, 0), 1))) - vtree:add(string.format("Button 1: %i", bit.band(bit.rshift(button_mask, 1), 1))) - vtree:add(string.format("Button 2: %i", bit.band(bit.rshift(button_mask, 2), 1))) - vtree:add(string.format("Button 3: %i", bit.band(bit.rshift(button_mask, 3), 1))) - vtree:add(string.format("Button 4: %i", bit.band(bit.rshift(button_mask, 4), 1))) - vtree:add(string.format("Button 5: %i", bit.band(bit.rshift(button_mask, 5), 1))) - vtree:add(string.format("Button 6: %i", bit.band(bit.rshift(button_mask, 6), 1))) - vtree:add(string.format("Button 7: %i", bit.band(bit.rshift(button_mask, 7), 1))) - - end - - end - end - -end - --- load the udp.port table -udp_table = DissectorTable.get("udp.port") - --- register our protocol to handle udp port 7777 -udp_table:add(9451,nordic_proto) \ No newline at end of file From a6b0e94bee94dfe0628e23571f24b6535093e239 Mon Sep 17 00:00:00 2001 From: hboeglen <60876763+hboeglen@users.noreply.github.com> Date: Thu, 24 Jun 2021 08:42:06 +0200 Subject: [PATCH 2/2] Version 3.7 --- .gitignore | 2 + CMakeLists.txt | 181 + LICENSE | 674 ++ MANIFEST.md | 16 + README.md | 67 + apps/CMakeLists.txt | 21 + cmake/Modules/CMakeParseArgumentsCopy.cmake | 138 + cmake/Modules/FindCppUnit.cmake | 39 + cmake/Modules/FindGnuradioRuntime.cmake | 36 + cmake/Modules/GrMiscUtils.cmake | 521 + cmake/Modules/GrPlatform.cmake | 50 + cmake/Modules/GrPython.cmake | 237 + cmake/Modules/GrSwig.cmake | 247 + cmake/Modules/GrTest.cmake | 139 + cmake/Modules/UseSWIG.cmake | 304 + cmake/Modules/nordicConfig.cmake | 30 + cmake/cmake_uninstall.cmake.in | 32 + docs/CMakeLists.txt | 31 + docs/README.nordic | 11 + docs/doxygen/CMakeLists.txt | 48 + docs/doxygen/Doxyfile.in | 1922 ++++ docs/doxygen/Doxyfile.swig_doc.in | 1890 ++++ docs/doxygen/doxyxml/__init__.py | 79 + docs/doxygen/doxyxml/base.py | 216 + docs/doxygen/doxyxml/doxyindex.py | 234 + docs/doxygen/doxyxml/generated/__init__.py | 7 + docs/doxygen/doxyxml/generated/compound.py | 503 + .../doxyxml/generated/compoundsuper.py | 8342 +++++++++++++++++ docs/doxygen/doxyxml/generated/index.py | 77 + docs/doxygen/doxyxml/generated/indexsuper.py | 523 ++ docs/doxygen/doxyxml/text.py | 53 + docs/doxygen/other/group_defs.dox | 7 + docs/doxygen/other/main_page.dox | 10 + docs/doxygen/swig_doc.py | 252 + examples/Frame_Sync_Example.grc | 1311 +++ examples/HackRF_RTLSDR_loopback_test.grc | 5622 +++++++++++ examples/epy_block_0.py | 38 + examples/fmmod.dat | Bin 0 -> 640512 bytes examples/gaussian_filter.dat | Bin 0 -> 319488 bytes examples/microsoft_mouse_sniffer.py | 171 + examples/nordic_auto_ack.py | 170 + examples/nordic_channelized_receiver.py | 135 + examples/nordic_channelized_transmitter.py | 110 + examples/nordic_receiver.py | 136 + examples/nordic_sniffer_scanner.py | 142 + examples/nordic_transmitter.py | 148 + examples/nordictap_test.grc | 215 + examples/nordictap_test.py | 108 + examples/nrf24_receiver.grc | 3276 +++++++ examples/nrf24_receiver.py | 238 + examples/nrf24_tx.grc | 419 + examples/nrf_scan.grc | 3008 ++++++ examples/nrf_scan.py | 234 + examples/nrz.dat | Bin 0 -> 160128 bytes examples/pluto_rx.grc | 749 ++ examples/pluto_rx.py | 164 + examples/simple_qt_gui.grc | 511 + examples/simple_qt_gui.py | 153 + examples/test_gfsk_mod.grc | 348 + examples/test_gfsk_mod.py | 139 + examples/test_gui.py | 56 + examples/top_block.py | 392 + examples/tx_nrf.grc | 2575 +++++ examples/tx_nrf.py | 245 + examples/tx_nrf_python_block.grc | 2756 ++++++ examples/tx_nrf_python_block.py | 271 + examples/tx_rx_loop.grc | 946 ++ examples/tx_rx_loop.py | 196 + grc/CMakeLists.txt | 20 + grc/nordic_nordic_rx.xml | 40 + grc/nordic_nordic_tx.xml | 26 + grc/nordic_nordictap_printer.xml | 13 + grc/nordic_nordictap_transmitter.xml | 46 + grc/nordictap_transmitter.xml | 37 + include/nordic/CMakeLists.txt | 24 + include/nordic/api.h | 29 + include/nordic/nordic_rx.h | 63 + include/nordic/nordic_tx.h | 56 + lib/CMakeLists.txt | 76 + lib/bit_shifting_byte_vector.cc | 79 + lib/bit_shifting_byte_vector.h | 72 + lib/enhanced_shockburst_packet.cc | 178 + lib/enhanced_shockburst_packet.h | 102 + lib/nordic_rx_impl.cc | 152 + lib/nordic_rx_impl.h | 74 + lib/nordic_tx_impl.cc | 173 + lib/nordic_tx_impl.h | 63 + lib/nordictap.h | 36 + lib/qa_nordic.cc | 32 + lib/qa_nordic.h | 34 + lib/test_nordic.cc | 44 + python/CMakeLists.txt | 40 + python/__init__.py | 37 + python/build_utils.py | 218 + python/build_utils_codes.py | 48 + python/nordic_blocks.py | 100 + swig/CMakeLists.txt | 61 + swig/nordic_swig.i | 19 + wireshark/nordic_dissector.lua | 115 + 99 files changed, 44028 insertions(+) create mode 100644 .gitignore create mode 100644 CMakeLists.txt create mode 100644 LICENSE create mode 100644 MANIFEST.md create mode 100644 README.md create mode 100644 apps/CMakeLists.txt create mode 100644 cmake/Modules/CMakeParseArgumentsCopy.cmake create mode 100644 cmake/Modules/FindCppUnit.cmake create mode 100644 cmake/Modules/FindGnuradioRuntime.cmake create mode 100644 cmake/Modules/GrMiscUtils.cmake create mode 100644 cmake/Modules/GrPlatform.cmake create mode 100644 cmake/Modules/GrPython.cmake create mode 100644 cmake/Modules/GrSwig.cmake create mode 100644 cmake/Modules/GrTest.cmake create mode 100644 cmake/Modules/UseSWIG.cmake create mode 100644 cmake/Modules/nordicConfig.cmake create mode 100644 cmake/cmake_uninstall.cmake.in create mode 100644 docs/CMakeLists.txt create mode 100644 docs/README.nordic create mode 100644 docs/doxygen/CMakeLists.txt create mode 100644 docs/doxygen/Doxyfile.in create mode 100644 docs/doxygen/Doxyfile.swig_doc.in create mode 100644 docs/doxygen/doxyxml/__init__.py create mode 100644 docs/doxygen/doxyxml/base.py create mode 100644 docs/doxygen/doxyxml/doxyindex.py create mode 100644 docs/doxygen/doxyxml/generated/__init__.py create mode 100644 docs/doxygen/doxyxml/generated/compound.py create mode 100644 docs/doxygen/doxyxml/generated/compoundsuper.py create mode 100644 docs/doxygen/doxyxml/generated/index.py create mode 100644 docs/doxygen/doxyxml/generated/indexsuper.py create mode 100644 docs/doxygen/doxyxml/text.py create mode 100644 docs/doxygen/other/group_defs.dox create mode 100644 docs/doxygen/other/main_page.dox create mode 100644 docs/doxygen/swig_doc.py create mode 100644 examples/Frame_Sync_Example.grc create mode 100644 examples/HackRF_RTLSDR_loopback_test.grc create mode 100644 examples/epy_block_0.py create mode 100644 examples/fmmod.dat create mode 100644 examples/gaussian_filter.dat create mode 100644 examples/microsoft_mouse_sniffer.py create mode 100644 examples/nordic_auto_ack.py create mode 100644 examples/nordic_channelized_receiver.py create mode 100644 examples/nordic_channelized_transmitter.py create mode 100644 examples/nordic_receiver.py create mode 100644 examples/nordic_sniffer_scanner.py create mode 100644 examples/nordic_transmitter.py create mode 100644 examples/nordictap_test.grc create mode 100644 examples/nordictap_test.py create mode 100644 examples/nrf24_receiver.grc create mode 100644 examples/nrf24_receiver.py create mode 100644 examples/nrf24_tx.grc create mode 100644 examples/nrf_scan.grc create mode 100644 examples/nrf_scan.py create mode 100644 examples/nrz.dat create mode 100644 examples/pluto_rx.grc create mode 100644 examples/pluto_rx.py create mode 100644 examples/simple_qt_gui.grc create mode 100644 examples/simple_qt_gui.py create mode 100644 examples/test_gfsk_mod.grc create mode 100644 examples/test_gfsk_mod.py create mode 100644 examples/test_gui.py create mode 100644 examples/top_block.py create mode 100644 examples/tx_nrf.grc create mode 100644 examples/tx_nrf.py create mode 100644 examples/tx_nrf_python_block.grc create mode 100644 examples/tx_nrf_python_block.py create mode 100644 examples/tx_rx_loop.grc create mode 100644 examples/tx_rx_loop.py create mode 100644 grc/CMakeLists.txt create mode 100644 grc/nordic_nordic_rx.xml create mode 100644 grc/nordic_nordic_tx.xml create mode 100644 grc/nordic_nordictap_printer.xml create mode 100644 grc/nordic_nordictap_transmitter.xml create mode 100644 grc/nordictap_transmitter.xml create mode 100644 include/nordic/CMakeLists.txt create mode 100644 include/nordic/api.h create mode 100644 include/nordic/nordic_rx.h create mode 100644 include/nordic/nordic_tx.h create mode 100644 lib/CMakeLists.txt create mode 100644 lib/bit_shifting_byte_vector.cc create mode 100644 lib/bit_shifting_byte_vector.h create mode 100644 lib/enhanced_shockburst_packet.cc create mode 100644 lib/enhanced_shockburst_packet.h create mode 100644 lib/nordic_rx_impl.cc create mode 100644 lib/nordic_rx_impl.h create mode 100644 lib/nordic_tx_impl.cc create mode 100644 lib/nordic_tx_impl.h create mode 100644 lib/nordictap.h create mode 100644 lib/qa_nordic.cc create mode 100644 lib/qa_nordic.h create mode 100644 lib/test_nordic.cc create mode 100644 python/CMakeLists.txt create mode 100644 python/__init__.py create mode 100644 python/build_utils.py create mode 100644 python/build_utils_codes.py create mode 100644 python/nordic_blocks.py create mode 100644 swig/CMakeLists.txt create mode 100644 swig/nordic_swig.i create mode 100644 wireshark/nordic_dissector.lua diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b92d662 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +build/ +*.pyc diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..3874c97 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,181 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +######################################################################## +# Project setup +######################################################################## +cmake_minimum_required(VERSION 2.6) +project(gr-nordic CXX C) +enable_testing() + +#select the release build type by default to get optimization flags +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") + message(STATUS "Build type not specified: defaulting to release.") +endif(NOT CMAKE_BUILD_TYPE) +set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "") + +#make sure our local CMake Modules path comes first +list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules) + +######################################################################## +# Compiler specific setup +######################################################################## +if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32) + #http://gcc.gnu.org/wiki/Visibility + add_definitions(-fvisibility=hidden) +endif() + +######################################################################## +# Find boost +######################################################################## +if(UNIX AND EXISTS "/usr/lib64") + list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix +endif(UNIX AND EXISTS "/usr/lib64") +set(Boost_ADDITIONAL_VERSIONS + "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39" + "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44" + "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49" + "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54" + "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59" + "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64" + "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69" +) +find_package(Boost "1.35" COMPONENTS filesystem system) + +if(NOT Boost_FOUND) + message(FATAL_ERROR "Boost required to compile nordic") +endif() + +######################################################################## +# Install directories +######################################################################## +include(GrPlatform) #define LIB_SUFFIX +set(GR_RUNTIME_DIR bin) +set(GR_LIBRARY_DIR lib${LIB_SUFFIX}) +set(GR_INCLUDE_DIR include/nordic) +set(GR_DATA_DIR share) +set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME}) +set(GR_DOC_DIR ${GR_DATA_DIR}/doc) +set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME}) +set(GR_CONF_DIR etc) +set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d) +set(GR_LIBEXEC_DIR libexec) +set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME}) +set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks) + +######################################################################## +# On Apple only, set install name and use rpath correctly, if not already set +######################################################################## +if(APPLE) + if(NOT CMAKE_INSTALL_NAME_DIR) + set(CMAKE_INSTALL_NAME_DIR + ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE + PATH "Library Install Name Destination Directory" FORCE) + endif(NOT CMAKE_INSTALL_NAME_DIR) + if(NOT CMAKE_INSTALL_RPATH) + set(CMAKE_INSTALL_RPATH + ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE + PATH "Library Install RPath" FORCE) + endif(NOT CMAKE_INSTALL_RPATH) + if(NOT CMAKE_BUILD_WITH_INSTALL_RPATH) + set(CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE + BOOL "Do Build Using Library Install RPath" FORCE) + endif(NOT CMAKE_BUILD_WITH_INSTALL_RPATH) +endif(APPLE) + +######################################################################## +# Find gnuradio build dependencies +######################################################################## +find_package(CppUnit) +find_package(Doxygen) + +# Search for GNU Radio and its components and versions. Add any +# components required to the list of GR_REQUIRED_COMPONENTS (in all +# caps such as FILTER or FFT) and change the version to the minimum +# API compatible version required. +set(GR_REQUIRED_COMPONENTS RUNTIME) +find_package(Gnuradio "3.7.2" REQUIRED) + +if(NOT CPPUNIT_FOUND) + message(FATAL_ERROR "CppUnit required to compile nordic") +endif() + +######################################################################## +# Setup doxygen option +######################################################################## +if(DOXYGEN_FOUND) + option(ENABLE_DOXYGEN "Build docs using Doxygen" ON) +else(DOXYGEN_FOUND) + option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF) +endif(DOXYGEN_FOUND) + +######################################################################## +# Setup the include and linker paths +######################################################################## +include_directories( + ${CMAKE_SOURCE_DIR}/lib + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_BINARY_DIR}/lib + ${CMAKE_BINARY_DIR}/include + ${Boost_INCLUDE_DIRS} + ${CPPUNIT_INCLUDE_DIRS} + ${GNURADIO_ALL_INCLUDE_DIRS} +) + +link_directories( + ${Boost_LIBRARY_DIRS} + ${CPPUNIT_LIBRARY_DIRS} + ${GNURADIO_RUNTIME_LIBRARY_DIRS} +) + +# Set component parameters +set(GR_NORDIC_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE) +set(GR_NORDIC_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE) + +######################################################################## +# Create uninstall target +######################################################################## +configure_file( + ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake +@ONLY) + +add_custom_target(uninstall + ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake +) + +######################################################################## +# Add subdirectories +######################################################################## +add_subdirectory(include/nordic) +add_subdirectory(lib) +add_subdirectory(swig) +add_subdirectory(python) +add_subdirectory(grc) +add_subdirectory(apps) +add_subdirectory(docs) + +######################################################################## +# Install cmake search helper for this library +######################################################################## +if(NOT CMAKE_MODULES_DIR) + set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake) +endif(NOT CMAKE_MODULES_DIR) + +install(FILES cmake/Modules/nordicConfig.cmake + DESTINATION ${CMAKE_MODULES_DIR}/nordic +) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9cecc1d --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MANIFEST.md b/MANIFEST.md new file mode 100644 index 0000000..49f9201 --- /dev/null +++ b/MANIFEST.md @@ -0,0 +1,16 @@ +title: The NORDIC OOT Module +brief: Short description of gr-nordic +tags: # Tags are arbitrary, but look at CGRAN what other authors are using + - sdr +author: + - Author Name +copyright_owner: + - Copyright Owner 1 +license: +#repo: # Put the URL of the repository here, or leave blank for default +#website: # If you have a separate project website, put it here +#icon: # Put a URL to a square image here that will be used as an icon on CGRAN +--- +A longer, multi-line description of gr-nordic. +You may use some *basic* Markdown here. +If left empty, it will try to find a README file instead. diff --git a/README.md b/README.md new file mode 100644 index 0000000..4afb6ab --- /dev/null +++ b/README.md @@ -0,0 +1,67 @@ +# gr-nordic + +GNU Radio module and Wireshark dissector for the Nordic Semiconductor nRF24L Enhanced Shockburst protocol. + +## external c++ classes + +### nordic_rx + +Receiver class which consumes a GFSK demodulated bitstream and reconstructs Enhanced Shockburst packets. PDUs are printed standard out and sent to Wireshark. + +### nordic_tx + +Transmitter class which consumes nordictap structs, generates Enhanced Shockburst packets, and produces a byte stream to be fed to a GFSK modulator. + +## python examples + +All python examples use the osmosdr_source/osmosdr_sink blocks, and are SDR agnostic. + +### nordic_receiver.py + +Single channel receiver. Listening on channel 4 (2404MHz) with a 2Mbps data rate, 5 byte address, and 2 byte CRC is invoked as follows: + +```./nordic_receiver.py --channel 4 --data_rate 2e6 --crc_length 2 --address_length 5 --samples_per_symbol 2 --gain 40``` + +### nordic_auto_ack.py + +Single channel receiver with auto-ACK. Listening (and ACKing) on channel 4 (2404MHz) with a 2Mbps data rate, 5 byte address, and 2 byte CRC is invoked as follows: + +```./nordic_auto_ack.py --channel 4 --data_rate 2e6 --crc_length 2 --address_length 5 --samples_per_symbol 2 --gain 40``` + +### nordic_sniffer_scanner.py + +Sweeping single channel receiver, which sweeps between channels 2-83 looking for Enhanced Shockburst packets. During receive activity, it camps on a given channel until idle. + +```./nordic_sniffer_scanner.py ``` + +### microsoft_mouse_sniffer.py + +Microsoft mouse/keyboard following receiver. When launched, this script will sweep between the 24 possible Microsoft wireless keyboard/mouse channels. When a device is found, it switches to that device's 4-channel group, sweeping between that set to follow the device. + +```./microsoft_mouse_sniffer.py ``` + +### nordic_channelized_receiver.py + +Channelized receiver example, which tunes to 2414MHz, and receives 2Mbps Enhanced Shockburst packets on channels 10, 14, and 18. + +```./nordic_channelized_receiver.py ``` + +### nordic_channelized_transmitter.py + +Channelized transmitter example, which tunes to 2414MHz, and transmits 2Mbps Enhanced Shockburst packets on channels 10, 14, and 18. + +```./nordic_channelized_transmitter.py ``` + +## wireshark dissector + +The wireshark dissector will display Enhanced Shockburst packets in Wireshark. The logic is very straightforward, and will be simple to extend to classify various device types. + +### wireshark/nordic_dissector.lua + +```wireshark -X lua_script:wireshark/nordic_dissector.lua -i lo -k -f udp ``` + +## nRF24LU1+ research firmware + +Corresponding research firmware for the nRF24LU1+ chips (including Logitech Unifying dongles) is available [here](https://github.com/BastilleResearch/nrf-research-firmware/). + +Documentation on the packet formats covered by the MouseJack and KeySniffer vulnerability sets is available [here](https://github.com/BastilleResearch/mousejack/tree/master/doc/pdf). diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt new file mode 100644 index 0000000..d63bda1 --- /dev/null +++ b/apps/CMakeLists.txt @@ -0,0 +1,21 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +include(GrPython) + +GR_PYTHON_INSTALL( + PROGRAMS + DESTINATION bin +) diff --git a/cmake/Modules/CMakeParseArgumentsCopy.cmake b/cmake/Modules/CMakeParseArgumentsCopy.cmake new file mode 100644 index 0000000..7ce4c49 --- /dev/null +++ b/cmake/Modules/CMakeParseArgumentsCopy.cmake @@ -0,0 +1,138 @@ +# CMAKE_PARSE_ARGUMENTS( args...) +# +# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for +# parsing the arguments given to that macro or function. +# It processes the arguments and defines a set of variables which hold the +# values of the respective options. +# +# The argument contains all options for the respective macro, +# i.e. keywords which can be used when calling the macro without any value +# following, like e.g. the OPTIONAL keyword of the install() command. +# +# The argument contains all keywords for this macro +# which are followed by one value, like e.g. DESTINATION keyword of the +# install() command. +# +# The argument contains all keywords for this macro +# which can be followed by more than one value, like e.g. the TARGETS or +# FILES keywords of the install() command. +# +# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the +# keywords listed in , and +# a variable composed of the given +# followed by "_" and the name of the respective keyword. +# These variables will then hold the respective value from the argument list. +# For the keywords this will be TRUE or FALSE. +# +# All remaining arguments are collected in a variable +# _UNPARSED_ARGUMENTS, this can be checked afterwards to see whether +# your macro was called with unrecognized parameters. +# +# As an example here a my_install() macro, which takes similar arguments as the +# real install() command: +# +# function(MY_INSTALL) +# set(options OPTIONAL FAST) +# set(oneValueArgs DESTINATION RENAME) +# set(multiValueArgs TARGETS CONFIGURATIONS) +# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) +# ... +# +# Assume my_install() has been called like this: +# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub) +# +# After the cmake_parse_arguments() call the macro will have set the following +# variables: +# MY_INSTALL_OPTIONAL = TRUE +# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install() +# MY_INSTALL_DESTINATION = "bin" +# MY_INSTALL_RENAME = "" (was not used) +# MY_INSTALL_TARGETS = "foo;bar" +# MY_INSTALL_CONFIGURATIONS = "" (was not used) +# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL" +# +# You can the continue and process these variables. +# +# Keywords terminate lists of values, e.g. if directly after a one_value_keyword +# another recognized keyword follows, this is interpreted as the beginning of +# the new option. +# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in +# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would +# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor. + +#============================================================================= +# Copyright 2010 Alexander Neundorf +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + + +if(__CMAKE_PARSE_ARGUMENTS_INCLUDED) + return() +endif() +set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE) + + +function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames) + # first set all result variables to empty/FALSE + foreach(arg_name ${_singleArgNames} ${_multiArgNames}) + set(${prefix}_${arg_name}) + endforeach(arg_name) + + foreach(option ${_optionNames}) + set(${prefix}_${option} FALSE) + endforeach(option) + + set(${prefix}_UNPARSED_ARGUMENTS) + + set(insideValues FALSE) + set(currentArgName) + + # now iterate over all arguments and fill the result variables + foreach(currentArg ${ARGN}) + list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword + list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword + list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword + + if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1) + if(insideValues) + if("${insideValues}" STREQUAL "SINGLE") + set(${prefix}_${currentArgName} ${currentArg}) + set(insideValues FALSE) + elseif("${insideValues}" STREQUAL "MULTI") + list(APPEND ${prefix}_${currentArgName} ${currentArg}) + endif() + else(insideValues) + list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg}) + endif(insideValues) + else() + if(NOT ${optionIndex} EQUAL -1) + set(${prefix}_${currentArg} TRUE) + set(insideValues FALSE) + elseif(NOT ${singleArgIndex} EQUAL -1) + set(currentArgName ${currentArg}) + set(${prefix}_${currentArgName}) + set(insideValues "SINGLE") + elseif(NOT ${multiArgIndex} EQUAL -1) + set(currentArgName ${currentArg}) + set(${prefix}_${currentArgName}) + set(insideValues "MULTI") + endif() + endif() + + endforeach(currentArg) + + # propagate the result variables to the caller: + foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames}) + set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE) + endforeach(arg_name) + set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE) + +endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs) diff --git a/cmake/Modules/FindCppUnit.cmake b/cmake/Modules/FindCppUnit.cmake new file mode 100644 index 0000000..f93ade3 --- /dev/null +++ b/cmake/Modules/FindCppUnit.cmake @@ -0,0 +1,39 @@ +# http://www.cmake.org/pipermail/cmake/2006-October/011446.html +# Modified to use pkg config and use standard var names + +# +# Find the CppUnit includes and library +# +# This module defines +# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc. +# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit. +# CPPUNIT_FOUND, If false, do not try to use CppUnit. + +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_CPPUNIT "cppunit") + +FIND_PATH(CPPUNIT_INCLUDE_DIRS + NAMES cppunit/TestCase.h + HINTS ${PC_CPPUNIT_INCLUDE_DIR} + ${CMAKE_INSTALL_PREFIX}/include + PATHS + /usr/local/include + /usr/include +) + +FIND_LIBRARY(CPPUNIT_LIBRARIES + NAMES cppunit + HINTS ${PC_CPPUNIT_LIBDIR} + ${CMAKE_INSTALL_PREFIX}/lib + ${CMAKE_INSTALL_PREFIX}/lib64 + PATHS + ${CPPUNIT_INCLUDE_DIRS}/../lib + /usr/local/lib + /usr/lib +) + +LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS}) + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) +MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) diff --git a/cmake/Modules/FindGnuradioRuntime.cmake b/cmake/Modules/FindGnuradioRuntime.cmake new file mode 100644 index 0000000..afed684 --- /dev/null +++ b/cmake/Modules/FindGnuradioRuntime.cmake @@ -0,0 +1,36 @@ +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime) + +if(PC_GNURADIO_RUNTIME_FOUND) + # look for include files + FIND_PATH( + GNURADIO_RUNTIME_INCLUDE_DIRS + NAMES gnuradio/top_block.h + HINTS $ENV{GNURADIO_RUNTIME_DIR}/include + ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS} + ${CMAKE_INSTALL_PREFIX}/include + PATHS /usr/local/include + /usr/include + ) + + # look for libs + FIND_LIBRARY( + GNURADIO_RUNTIME_LIBRARIES + NAMES gnuradio-runtime + HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib + ${PC_GNURADIO_RUNTIME_LIBDIR} + ${CMAKE_INSTALL_PREFIX}/lib/ + ${CMAKE_INSTALL_PREFIX}/lib64/ + PATHS /usr/local/lib + /usr/local/lib64 + /usr/lib + /usr/lib64 + ) + + set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND}) +endif(PC_GNURADIO_RUNTIME_FOUND) + +INCLUDE(FindPackageHandleStandardArgs) +# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used. +FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES) +MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS) diff --git a/cmake/Modules/GrMiscUtils.cmake b/cmake/Modules/GrMiscUtils.cmake new file mode 100644 index 0000000..7b8b7ed --- /dev/null +++ b/cmake/Modules/GrMiscUtils.cmake @@ -0,0 +1,521 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE) + return() +endif() +set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE) + +######################################################################## +# Set global variable macro. +# Used for subdirectories to export settings. +# Example: include and library paths. +######################################################################## +function(GR_SET_GLOBAL var) + set(${var} ${ARGN} CACHE INTERNAL "" FORCE) +endfunction(GR_SET_GLOBAL) + +######################################################################## +# Set the pre-processor definition if the condition is true. +# - def the pre-processor definition to set and condition name +######################################################################## +function(GR_ADD_COND_DEF def) + if(${def}) + add_definitions(-D${def}) + endif(${def}) +endfunction(GR_ADD_COND_DEF) + +######################################################################## +# Check for a header and conditionally set a compile define. +# - hdr the relative path to the header file +# - def the pre-processor definition to set +######################################################################## +function(GR_CHECK_HDR_N_DEF hdr def) + include(CheckIncludeFileCXX) + CHECK_INCLUDE_FILE_CXX(${hdr} ${def}) + GR_ADD_COND_DEF(${def}) +endfunction(GR_CHECK_HDR_N_DEF) + +######################################################################## +# Include subdirectory macro. +# Sets the CMake directory variables, +# includes the subdirectory CMakeLists.txt, +# resets the CMake directory variables. +# +# This macro includes subdirectories rather than adding them +# so that the subdirectory can affect variables in the level above. +# This provides a work-around for the lack of convenience libraries. +# This way a subdirectory can append to the list of library sources. +######################################################################## +macro(GR_INCLUDE_SUBDIRECTORY subdir) + #insert the current directories on the front of the list + list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR}) + list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR}) + + #set the current directories to the names of the subdirs + set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) + set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir}) + + #include the subdirectory CMakeLists to run it + file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) + + #reset the value of the current directories + list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR) + list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR) + + #pop the subdir names of the front of the list + list(REMOVE_AT _cmake_source_dirs 0) + list(REMOVE_AT _cmake_binary_dirs 0) +endmacro(GR_INCLUDE_SUBDIRECTORY) + +######################################################################## +# Check if a compiler flag works and conditionally set a compile define. +# - flag the compiler flag to check for +# - have the variable to set with result +######################################################################## +macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have) + include(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG(${flag} ${have}) + if(${have}) + if(${CMAKE_VERSION} VERSION_GREATER "2.8.4") + STRING(FIND "${CMAKE_CXX_FLAGS}" "${flag}" flag_dup) + if(${flag_dup} EQUAL -1) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") + endif(${flag_dup} EQUAL -1) + endif(${CMAKE_VERSION} VERSION_GREATER "2.8.4") + endif(${have}) +endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE) + +######################################################################## +# Generates the .la libtool file +# This appears to generate libtool files that cannot be used by auto*. +# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest]) +# Notice: there is not COMPONENT option, these will not get distributed. +######################################################################## +function(GR_LIBTOOL) + if(NOT DEFINED GENERATE_LIBTOOL) + set(GENERATE_LIBTOOL OFF) #disabled by default + endif() + + if(GENERATE_LIBTOOL) + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN}) + + find_program(LIBTOOL libtool) + if(LIBTOOL) + include(CMakeMacroLibtoolFile) + CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION}) + endif(LIBTOOL) + endif(GENERATE_LIBTOOL) + +endfunction(GR_LIBTOOL) + +######################################################################## +# Do standard things to the library target +# - set target properties +# - make install rules +# Also handle gnuradio custom naming conventions w/ extras mode. +######################################################################## +function(GR_LIBRARY_FOO target) + #parse the arguments for component names + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN}) + + #set additional target properties + set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER}) + + #install the generated files like so... + install(TARGETS ${target} + LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file + ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file + RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file + ) + + #extras mode enabled automatically on linux + if(NOT DEFINED LIBRARY_EXTRAS) + set(LIBRARY_EXTRAS ${LINUX}) + endif() + + #special extras mode to enable alternative naming conventions + if(LIBRARY_EXTRAS) + + #create .la file before changing props + GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR}) + + #give the library a special name with ultra-zero soversion + set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0") + set(target_name lib${target}-${LIBVER}.so.0.0.0) + + #custom command to generate symlinks + add_custom_command( + TARGET ${target} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so + COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 + COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install + ) + + #and install the extra symlinks + install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so + ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 + DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} + ) + + endif(LIBRARY_EXTRAS) +endfunction(GR_LIBRARY_FOO) + +######################################################################## +# Create a dummy custom command that depends on other targets. +# Usage: +# GR_GEN_TARGET_DEPS(unique_name target_deps ...) +# ADD_CUSTOM_COMMAND( ${target_deps}) +# +# Custom command cant depend on targets, but can depend on executables, +# and executables can depend on targets. So this is the process: +######################################################################## +function(GR_GEN_TARGET_DEPS name var) + file( + WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in + "int main(void){return 0;}\n" + ) + execute_process( + COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in + ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp + ) + add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp) + if(ARGN) + add_dependencies(${name} ${ARGN}) + endif(ARGN) + + if(CMAKE_CROSSCOMPILING) + set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross + else() + set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE) + endif() +endfunction(GR_GEN_TARGET_DEPS) + +######################################################################## +# Control use of gr_logger +# Usage: +# GR_LOGGING() +# +# Will set ENABLE_GR_LOG to 1 by default. +# Can manually set with -DENABLE_GR_LOG=0|1 +######################################################################## +function(GR_LOGGING) + find_package(Log4cpp) + + OPTION(ENABLE_GR_LOG "Use gr_logger" ON) + if(ENABLE_GR_LOG) + # If gr_logger is enabled, make it usable + add_definitions( -DENABLE_GR_LOG ) + + # also test LOG4CPP; if we have it, use this version of the logger + # otherwise, default to the stdout/stderr model. + if(LOG4CPP_FOUND) + SET(HAVE_LOG4CPP True CACHE INTERNAL "" FORCE) + add_definitions( -DHAVE_LOG4CPP ) + else(not LOG4CPP_FOUND) + SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) + SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) + SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) + SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) + endif(LOG4CPP_FOUND) + + SET(ENABLE_GR_LOG ${ENABLE_GR_LOG} CACHE INTERNAL "" FORCE) + + else(ENABLE_GR_LOG) + SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) + SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) + SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) + SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) + endif(ENABLE_GR_LOG) + + message(STATUS "ENABLE_GR_LOG set to ${ENABLE_GR_LOG}.") + message(STATUS "HAVE_LOG4CPP set to ${HAVE_LOG4CPP}.") + message(STATUS "LOG4CPP_LIBRARIES set to ${LOG4CPP_LIBRARIES}.") + +endfunction(GR_LOGGING) + +######################################################################## +# Run GRCC to compile .grc files into .py files. +# +# Usage: GRCC(filename, directory) +# - filenames: List of file name of .grc file +# - directory: directory of built .py file - usually in +# ${CMAKE_CURRENT_BINARY_DIR} +# - Sets PYFILES: output converted GRC file names to Python files. +######################################################################## +function(GRCC) + # Extract directory from list of args, remove it for the list of filenames. + list(GET ARGV -1 directory) + list(REMOVE_AT ARGV -1) + set(filenames ${ARGV}) + file(MAKE_DIRECTORY ${directory}) + + SET(GRCC_COMMAND ${CMAKE_SOURCE_DIR}/gr-utils/python/grcc) + + # GRCC uses some stuff in grc and gnuradio-runtime, so we force + # the known paths here + list(APPEND PYTHONPATHS + ${CMAKE_SOURCE_DIR} + ${CMAKE_SOURCE_DIR}/gnuradio-runtime/python + ${CMAKE_SOURCE_DIR}/gnuradio-runtime/lib/swig + ${CMAKE_BINARY_DIR}/gnuradio-runtime/lib/swig + ) + + if(WIN32) + #SWIG generates the python library files into a subdirectory. + #Therefore, we must append this subdirectory into PYTHONPATH. + #Only do this for the python directories matching the following: + foreach(pydir ${PYTHONPATHS}) + get_filename_component(name ${pydir} NAME) + if(name MATCHES "^(swig|lib|src)$") + list(APPEND PYTHONPATHS ${pydir}/${CMAKE_BUILD_TYPE}) + endif() + endforeach(pydir) + endif(WIN32) + + file(TO_NATIVE_PATH "${PYTHONPATHS}" pypath) + + if(UNIX) + list(APPEND pypath "$PYTHONPATH") + string(REPLACE ";" ":" pypath "${pypath}") + set(ENV{PYTHONPATH} ${pypath}) + endif(UNIX) + + if(WIN32) + list(APPEND pypath "%PYTHONPATH%") + string(REPLACE ";" "\\;" pypath "${pypath}") + #list(APPEND environs "PYTHONPATH=${pypath}") + set(ENV{PYTHONPATH} ${pypath}) + endif(WIN32) + + foreach(f ${filenames}) + execute_process( + COMMAND ${GRCC_COMMAND} -d ${directory} ${f} + ) + string(REPLACE ".grc" ".py" pyfile "${f}") + string(REPLACE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" pyfile "${pyfile}") + list(APPEND pyfiles ${pyfile}) + endforeach(f) + + set(PYFILES ${pyfiles} PARENT_SCOPE) +endfunction(GRCC) + +######################################################################## +# Check if HAVE_PTHREAD_SETSCHEDPARAM and HAVE_SCHED_SETSCHEDULER +# should be defined +######################################################################## +macro(GR_CHECK_LINUX_SCHED_AVAIL) +set(CMAKE_REQUIRED_LIBRARIES -lpthread) + CHECK_CXX_SOURCE_COMPILES(" + #include + int main(){ + pthread_t pthread; + pthread_setschedparam(pthread, 0, 0); + return 0; + } " HAVE_PTHREAD_SETSCHEDPARAM + ) + GR_ADD_COND_DEF(HAVE_PTHREAD_SETSCHEDPARAM) + + CHECK_CXX_SOURCE_COMPILES(" + #include + int main(){ + pid_t pid; + sched_setscheduler(pid, 0, 0); + return 0; + } " HAVE_SCHED_SETSCHEDULER + ) + GR_ADD_COND_DEF(HAVE_SCHED_SETSCHEDULER) +endmacro(GR_CHECK_LINUX_SCHED_AVAIL) + +######################################################################## +# Macros to generate source and header files from template +######################################################################## +macro(GR_EXPAND_X_H component root) + + include(GrPython) + + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py +"#!${PYTHON_EXECUTABLE} + +import sys, os, re +sys.path.append('${GR_RUNTIME_PYTHONPATH}') +os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' +os.chdir('${CMAKE_CURRENT_BINARY_DIR}') + +if __name__ == '__main__': + import build_utils + root, inp = sys.argv[1:3] + for sig in sys.argv[3:]: + name = re.sub ('X+', sig, root) + d = build_utils.standard_dict2(name, sig, '${component}') + build_utils.expand_template(d, inp) +") + + #make a list of all the generated headers + unset(expanded_files_h) + foreach(sig ${ARGN}) + string(REGEX REPLACE "X+" ${sig} name ${root}) + list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) + endforeach(sig) + unset(name) + + #create a command to generate the headers + add_custom_command( + OUTPUT ${expanded_files_h} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py + ${root} ${root}.h.t ${ARGN} + ) + + #install rules for the generated headers + list(APPEND generated_includes ${expanded_files_h}) + +endmacro(GR_EXPAND_X_H) + +macro(GR_EXPAND_X_CC_H component root) + + include(GrPython) + + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py +"#!${PYTHON_EXECUTABLE} + +import sys, os, re +sys.path.append('${GR_RUNTIME_PYTHONPATH}') +os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' +os.chdir('${CMAKE_CURRENT_BINARY_DIR}') + +if __name__ == '__main__': + import build_utils + root, inp = sys.argv[1:3] + for sig in sys.argv[3:]: + name = re.sub ('X+', sig, root) + d = build_utils.standard_impl_dict2(name, sig, '${component}') + build_utils.expand_template(d, inp) +") + + #make a list of all the generated files + unset(expanded_files_cc) + unset(expanded_files_h) + foreach(sig ${ARGN}) + string(REGEX REPLACE "X+" ${sig} name ${root}) + list(APPEND expanded_files_cc ${CMAKE_CURRENT_BINARY_DIR}/${name}.cc) + list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) + endforeach(sig) + unset(name) + + #create a command to generate the source files + add_custom_command( + OUTPUT ${expanded_files_cc} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.cc.t + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py + ${root} ${root}.cc.t ${ARGN} + ) + + #create a command to generate the header files + add_custom_command( + OUTPUT ${expanded_files_h} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py + ${root} ${root}.h.t ${ARGN} + ) + + #make source files depends on headers to force generation + set_source_files_properties(${expanded_files_cc} + PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" + ) + + #install rules for the generated files + list(APPEND generated_sources ${expanded_files_cc}) + list(APPEND generated_headers ${expanded_files_h}) + +endmacro(GR_EXPAND_X_CC_H) + +macro(GR_EXPAND_X_CC_H_IMPL component root) + + include(GrPython) + + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py +"#!${PYTHON_EXECUTABLE} + +import sys, os, re +sys.path.append('${GR_RUNTIME_PYTHONPATH}') +os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' +os.chdir('${CMAKE_CURRENT_BINARY_DIR}') + +if __name__ == '__main__': + import build_utils + root, inp = sys.argv[1:3] + for sig in sys.argv[3:]: + name = re.sub ('X+', sig, root) + d = build_utils.standard_dict(name, sig, '${component}') + build_utils.expand_template(d, inp, '_impl') +") + + #make a list of all the generated files + unset(expanded_files_cc_impl) + unset(expanded_files_h_impl) + unset(expanded_files_h) + foreach(sig ${ARGN}) + string(REGEX REPLACE "X+" ${sig} name ${root}) + list(APPEND expanded_files_cc_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.cc) + list(APPEND expanded_files_h_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.h) + list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/../include/gnuradio/${component}/${name}.h) + endforeach(sig) + unset(name) + + #create a command to generate the _impl.cc files + add_custom_command( + OUTPUT ${expanded_files_cc_impl} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.cc.t + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py + ${root} ${root}_impl.cc.t ${ARGN} + ) + + #create a command to generate the _impl.h files + add_custom_command( + OUTPUT ${expanded_files_h_impl} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.h.t + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py + ${root} ${root}_impl.h.t ${ARGN} + ) + + #make _impl.cc source files depend on _impl.h to force generation + set_source_files_properties(${expanded_files_cc_impl} + PROPERTIES OBJECT_DEPENDS "${expanded_files_h_impl}" + ) + + #make _impl.h source files depend on headers to force generation + set_source_files_properties(${expanded_files_h_impl} + PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" + ) + + #install rules for the generated files + list(APPEND generated_sources ${expanded_files_cc_impl}) + list(APPEND generated_headers ${expanded_files_h_impl}) + +endmacro(GR_EXPAND_X_CC_H_IMPL) diff --git a/cmake/Modules/GrPlatform.cmake b/cmake/Modules/GrPlatform.cmake new file mode 100644 index 0000000..1139047 --- /dev/null +++ b/cmake/Modules/GrPlatform.cmake @@ -0,0 +1,50 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE) + return() +endif() +set(__INCLUDED_GR_PLATFORM_CMAKE TRUE) + +######################################################################## +# Setup additional defines for OS types +######################################################################## +if(CMAKE_SYSTEM_NAME STREQUAL "Linux") + set(LINUX TRUE) +endif() + +if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/debian_version") + set(DEBIAN TRUE) +endif() + +if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/redhat-release") + set(REDHAT TRUE) +endif() + +if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/slackware-version") + set(SLACKWARE TRUE) +endif() + +######################################################################## +# when the library suffix should be 64 (applies to redhat linux family) +######################################################################## +if (REDHAT OR SLACKWARE) + set(LIB64_CONVENTION TRUE) +endif() + +if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$") + set(LIB_SUFFIX 64) +endif() +set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix") diff --git a/cmake/Modules/GrPython.cmake b/cmake/Modules/GrPython.cmake new file mode 100644 index 0000000..ca6c34e --- /dev/null +++ b/cmake/Modules/GrPython.cmake @@ -0,0 +1,237 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +if(DEFINED __INCLUDED_GR_PYTHON_CMAKE) + return() +endif() +set(__INCLUDED_GR_PYTHON_CMAKE TRUE) + +######################################################################## +# Setup the python interpreter: +# This allows the user to specify a specific interpreter, +# or finds the interpreter via the built-in cmake module. +######################################################################## +#this allows the user to override PYTHON_EXECUTABLE +if(PYTHON_EXECUTABLE) + + set(PYTHONINTERP_FOUND TRUE) + +#otherwise if not set, try to automatically find it +else(PYTHON_EXECUTABLE) + + #use the built-in find script + find_package(PythonInterp 2) + + #and if that fails use the find program routine + if(NOT PYTHONINTERP_FOUND) + find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5) + if(PYTHON_EXECUTABLE) + set(PYTHONINTERP_FOUND TRUE) + endif(PYTHON_EXECUTABLE) + endif(NOT PYTHONINTERP_FOUND) + +endif(PYTHON_EXECUTABLE) + +if (CMAKE_CROSSCOMPILING) + set(QA_PYTHON_EXECUTABLE "/usr/bin/python") +else (CMAKE_CROSSCOMPILING) + set(QA_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE}) +endif(CMAKE_CROSSCOMPILING) + +#make the path to the executable appear in the cmake gui +set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter") +set(QA_PYTHON_EXECUTABLE ${QA_PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter for QA tests") + +#make sure we can use -B with python (introduced in 2.6) +if(PYTHON_EXECUTABLE) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -B -c "" + OUTPUT_QUIET ERROR_QUIET + RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT + ) + if(PYTHON_HAS_DASH_B_RESULT EQUAL 0) + set(PYTHON_DASH_B "-B") + endif() +endif(PYTHON_EXECUTABLE) + +######################################################################## +# Check for the existence of a python module: +# - desc a string description of the check +# - mod the name of the module to import +# - cmd an additional command to run +# - have the result variable to set +######################################################################## +macro(GR_PYTHON_CHECK_MODULE desc mod cmd have) + message(STATUS "") + message(STATUS "Python checking for ${desc}") + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c " +######################################### +try: + import ${mod} + assert ${cmd} +except ImportError, AssertionError: exit(-1) +except: pass +#########################################" + RESULT_VARIABLE ${have} + ) + if(${have} EQUAL 0) + message(STATUS "Python checking for ${desc} - found") + set(${have} TRUE) + else(${have} EQUAL 0) + message(STATUS "Python checking for ${desc} - not found") + set(${have} FALSE) + endif(${have} EQUAL 0) +endmacro(GR_PYTHON_CHECK_MODULE) + +######################################################################## +# Sets the python installation directory GR_PYTHON_DIR +######################################################################## +if(NOT DEFINED GR_PYTHON_DIR) +execute_process(COMMAND ${PYTHON_EXECUTABLE} -c " +from distutils import sysconfig +print sysconfig.get_python_lib(plat_specific=True, prefix='') +" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE +) +endif() +file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR) + +######################################################################## +# Create an always-built target with a unique name +# Usage: GR_UNIQUE_TARGET( ) +######################################################################## +function(GR_UNIQUE_TARGET desc) + file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib +unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] +print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))" + OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE) + add_custom_target(${_target} ALL DEPENDS ${ARGN}) +endfunction(GR_UNIQUE_TARGET) + +######################################################################## +# Install python sources (also builds and installs byte-compiled python) +######################################################################## +function(GR_PYTHON_INSTALL) + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN}) + + #################################################################### + if(GR_PYTHON_INSTALL_FILES) + #################################################################### + install(${ARGN}) #installs regular python files + + #create a list of all generated files + unset(pysrcfiles) + unset(pycfiles) + unset(pyofiles) + foreach(pyfile ${GR_PYTHON_INSTALL_FILES}) + get_filename_component(pyfile ${pyfile} ABSOLUTE) + list(APPEND pysrcfiles ${pyfile}) + + #determine if this file is in the source or binary directory + file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile}) + string(LENGTH "${source_rel_path}" source_rel_path_len) + file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile}) + string(LENGTH "${binary_rel_path}" binary_rel_path_len) + + #and set the generated path appropriately + if(${source_rel_path_len} GREATER ${binary_rel_path_len}) + set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path}) + else() + set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path}) + endif() + list(APPEND pycfiles ${pygenfile}c) + list(APPEND pyofiles ${pygenfile}o) + + #ensure generation path exists + get_filename_component(pygen_path ${pygenfile} PATH) + file(MAKE_DIRECTORY ${pygen_path}) + + endforeach(pyfile) + + #the command to generate the pyc files + add_custom_command( + DEPENDS ${pysrcfiles} OUTPUT ${pycfiles} + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles} + ) + + #the command to generate the pyo files + add_custom_command( + DEPENDS ${pysrcfiles} OUTPUT ${pyofiles} + COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles} + ) + + #create install rule and add generated files to target list + set(python_install_gen_targets ${pycfiles} ${pyofiles}) + install(FILES ${python_install_gen_targets} + DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} + COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} + ) + + #################################################################### + elseif(GR_PYTHON_INSTALL_PROGRAMS) + #################################################################### + file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native) + + if (CMAKE_CROSSCOMPILING) + set(pyexe_native "/usr/bin/env python") + endif() + + foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS}) + get_filename_component(pyfile_name ${pyfile} NAME) + get_filename_component(pyfile ${pyfile} ABSOLUTE) + string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe") + list(APPEND python_install_gen_targets ${pyexefile}) + + get_filename_component(pyexefile_path ${pyexefile} PATH) + file(MAKE_DIRECTORY ${pyexefile_path}) + + add_custom_command( + OUTPUT ${pyexefile} DEPENDS ${pyfile} + COMMAND ${PYTHON_EXECUTABLE} -c + "import re; R=re.compile('^\#!.*$\\n',flags=re.MULTILINE); open('${pyexefile}','w').write('\#!${pyexe_native}\\n'+R.sub('',open('${pyfile}','r').read()))" + COMMENT "Shebangin ${pyfile_name}" + VERBATIM + ) + + #on windows, python files need an extension to execute + get_filename_component(pyfile_ext ${pyfile} EXT) + if(WIN32 AND NOT pyfile_ext) + set(pyfile_name "${pyfile_name}.py") + endif() + + install(PROGRAMS ${pyexefile} RENAME ${pyfile_name} + DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} + COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} + ) + endforeach(pyfile) + + endif() + + GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets}) + +endfunction(GR_PYTHON_INSTALL) + +######################################################################## +# Write the python helper script that generates byte code files +######################################################################## +file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py " +import sys, py_compile +files = sys.argv[1:] +srcs, gens = files[:len(files)/2], files[len(files)/2:] +for src, gen in zip(srcs, gens): + py_compile.compile(file=src, cfile=gen, doraise=True) +") diff --git a/cmake/Modules/GrSwig.cmake b/cmake/Modules/GrSwig.cmake new file mode 100644 index 0000000..59720ca --- /dev/null +++ b/cmake/Modules/GrSwig.cmake @@ -0,0 +1,247 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +if(DEFINED __INCLUDED_GR_SWIG_CMAKE) + return() +endif() +set(__INCLUDED_GR_SWIG_CMAKE TRUE) + +include(GrPython) + +######################################################################## +# Builds a swig documentation file to be generated into python docstrings +# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....) +# +# Set the following variable to specify extra dependent targets: +# - GR_SWIG_DOCS_SOURCE_DEPS +# - GR_SWIG_DOCS_TARGET_DEPS +######################################################################## +function(GR_SWIG_MAKE_DOCS output_file) + if(ENABLE_DOXYGEN) + + #setup the input files variable list, quote formated + set(input_files) + unset(INPUT_PATHS) + foreach(input_path ${ARGN}) + if(IS_DIRECTORY ${input_path}) #when input path is a directory + file(GLOB input_path_h_files ${input_path}/*.h) + else() #otherwise its just a file, no glob + set(input_path_h_files ${input_path}) + endif() + list(APPEND input_files ${input_path_h_files}) + set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"") + endforeach(input_path) + + #determine the output directory + get_filename_component(name ${output_file} NAME_WE) + get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH) + set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs) + make_directory(${OUTPUT_DIRECTORY}) + + #generate the Doxyfile used by doxygen + configure_file( + ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in + ${OUTPUT_DIRECTORY}/Doxyfile + @ONLY) + + #Create a dummy custom command that depends on other targets + include(GrMiscUtils) + GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS}) + + #call doxygen on the Doxyfile + input headers + add_custom_command( + OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml + DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps} + COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile + COMMENT "Generating doxygen xml for ${name} docs" + ) + + #call the swig_doc script on the xml files + add_custom_command( + OUTPUT ${output_file} + DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py + ${OUTPUT_DIRECTORY}/xml + ${output_file} + COMMENT "Generating python docstrings for ${name}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen + ) + + else(ENABLE_DOXYGEN) + file(WRITE ${output_file} "\n") #no doxygen -> empty file + endif(ENABLE_DOXYGEN) +endfunction(GR_SWIG_MAKE_DOCS) + +######################################################################## +# Build a swig target for the common gnuradio use case. Usage: +# GR_SWIG_MAKE(target ifile ifile ifile...) +# +# Set the following variables before calling: +# - GR_SWIG_FLAGS +# - GR_SWIG_INCLUDE_DIRS +# - GR_SWIG_LIBRARIES +# - GR_SWIG_SOURCE_DEPS +# - GR_SWIG_TARGET_DEPS +# - GR_SWIG_DOC_FILE +# - GR_SWIG_DOC_DIRS +######################################################################## +macro(GR_SWIG_MAKE name) + set(ifiles ${ARGN}) + + # Shimming this in here to take care of a SWIG bug with handling + # vector and vector (on 32-bit machines) and + # vector (on 64-bit machines). Use this to test + # the size of size_t, then set SIZE_T_32 if it's a 32-bit machine + # or not if it's 64-bit. The logic in gr_type.i handles the rest. + INCLUDE(CheckTypeSize) + CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T) + CHECK_TYPE_SIZE("unsigned int" SIZEOF_UINT) + if(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) + list(APPEND GR_SWIG_FLAGS -DSIZE_T_32) + endif(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) + + #do swig doc generation if specified + if(GR_SWIG_DOC_FILE) + set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS}) + list(APPEND GR_SWIG_DOCS_TARGET_DEPS ${GR_SWIG_TARGET_DEPS}) + GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS}) + add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE}) + list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc ${GR_RUNTIME_SWIG_DOC_FILE}) + endif() + + #append additional include directories + find_package(PythonLibs 2) + list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs) + list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) + + #prepend local swig directories + list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_SOURCE_DIR}) + list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_BINARY_DIR}) + + #determine include dependencies for swig file + execute_process( + COMMAND ${PYTHON_EXECUTABLE} + ${CMAKE_BINARY_DIR}/get_swig_deps.py + "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}" + OUTPUT_STRIP_TRAILING_WHITESPACE + OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + ) + + #Create a dummy custom command that depends on other targets + include(GrMiscUtils) + GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS}) + set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag) + add_custom_command( + OUTPUT ${tag_file} + DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps} + COMMAND ${CMAKE_COMMAND} -E touch ${tag_file} + ) + + #append the specified include directories + include_directories(${GR_SWIG_INCLUDE_DIRS}) + list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file}) + + #setup the swig flags with flags and include directories + set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS}) + foreach(dir ${GR_SWIG_INCLUDE_DIRS}) + list(APPEND CMAKE_SWIG_FLAGS "-I${dir}") + endforeach(dir) + + #set the C++ property on the swig .i file so it builds + set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON) + + #setup the actual swig library target to be built + include(UseSWIG) + SWIG_ADD_MODULE(${name} python ${ifiles}) + SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES}) + if(${name} STREQUAL "runtime_swig") + SET_TARGET_PROPERTIES(${SWIG_MODULE_runtime_swig_REAL_NAME} PROPERTIES DEFINE_SYMBOL "gnuradio_runtime_EXPORTS") + endif(${name} STREQUAL "runtime_swig") + +endmacro(GR_SWIG_MAKE) + +######################################################################## +# Install swig targets generated by GR_SWIG_MAKE. Usage: +# GR_SWIG_INSTALL( +# TARGETS target target target... +# [DESTINATION destination] +# [COMPONENT component] +# ) +######################################################################## +macro(GR_SWIG_INSTALL) + + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN}) + + foreach(name ${GR_SWIG_INSTALL_TARGETS}) + install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME} + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + COMPONENT ${GR_SWIG_INSTALL_COMPONENT} + ) + + include(GrPython) + GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + COMPONENT ${GR_SWIG_INSTALL_COMPONENT} + ) + + GR_LIBTOOL( + TARGET ${SWIG_MODULE_${name}_REAL_NAME} + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + ) + + endforeach(name) + +endmacro(GR_SWIG_INSTALL) + +######################################################################## +# Generate a python file that can determine swig dependencies. +# Used by the make macro above to determine extra dependencies. +# When you build C++, CMake figures out the header dependencies. +# This code essentially performs that logic for swig includes. +######################################################################## +file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py " + +import os, sys, re + +i_include_matcher = re.compile('%(include|import)\\s*[<|\"](.*)[>|\"]') +h_include_matcher = re.compile('#(include)\\s*[<|\"](.*)[>|\"]') +include_dirs = sys.argv[2].split(';') + +def get_swig_incs(file_path): + if file_path.endswith('.i'): matcher = i_include_matcher + else: matcher = h_include_matcher + file_contents = open(file_path, 'r').read() + return matcher.findall(file_contents, re.MULTILINE) + +def get_swig_deps(file_path, level): + deps = [file_path] + if level == 0: return deps + for keyword, inc_file in get_swig_incs(file_path): + for inc_dir in include_dirs: + inc_path = os.path.join(inc_dir, inc_file) + if not os.path.exists(inc_path): continue + deps.extend(get_swig_deps(inc_path, level-1)) + break #found, we dont search in lower prio inc dirs + return deps + +if __name__ == '__main__': + ifiles = sys.argv[1].split(';') + deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], []) + #sys.stderr.write(';'.join(set(deps)) + '\\n\\n') + print(';'.join(set(deps))) +") diff --git a/cmake/Modules/GrTest.cmake b/cmake/Modules/GrTest.cmake new file mode 100644 index 0000000..8660715 --- /dev/null +++ b/cmake/Modules/GrTest.cmake @@ -0,0 +1,139 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +if(DEFINED __INCLUDED_GR_TEST_CMAKE) + return() +endif() +set(__INCLUDED_GR_TEST_CMAKE TRUE) + +######################################################################## +# Add a unit test and setup the environment for a unit test. +# Takes the same arguments as the ADD_TEST function. +# +# Before calling set the following variables: +# GR_TEST_TARGET_DEPS - built targets for the library path +# GR_TEST_LIBRARY_DIRS - directories for the library path +# GR_TEST_PYTHON_DIRS - directories for the python path +# GR_TEST_ENVIRONS - other environment key/value pairs +######################################################################## +function(GR_ADD_TEST test_name) + + #Ensure that the build exe also appears in the PATH. + list(APPEND GR_TEST_TARGET_DEPS ${ARGN}) + + #In the land of windows, all libraries must be in the PATH. + #Since the dependent libraries are not yet installed, + #we must manually set them in the PATH to run tests. + #The following appends the path of a target dependency. + foreach(target ${GR_TEST_TARGET_DEPS}) + get_target_property(location ${target} LOCATION) + if(location) + get_filename_component(path ${location} PATH) + string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path}) + list(APPEND GR_TEST_LIBRARY_DIRS ${path}) + endif(location) + endforeach(target) + + if(WIN32) + #SWIG generates the python library files into a subdirectory. + #Therefore, we must append this subdirectory into PYTHONPATH. + #Only do this for the python directories matching the following: + foreach(pydir ${GR_TEST_PYTHON_DIRS}) + get_filename_component(name ${pydir} NAME) + if(name MATCHES "^(swig|lib|src)$") + list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE}) + endif() + endforeach(pydir) + endif(WIN32) + + file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir) + file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list? + file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list? + + set(environs "VOLK_GENERIC=1" "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}") + list(APPEND environs ${GR_TEST_ENVIRONS}) + + #http://www.cmake.org/pipermail/cmake/2009-May/029464.html + #Replaced this add test + set environs code with the shell script generation. + #Its nicer to be able to manually run the shell script to diagnose problems. + #ADD_TEST(${ARGV}) + #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}") + + if(UNIX) + set(LD_PATH_VAR "LD_LIBRARY_PATH") + if(APPLE) + set(LD_PATH_VAR "DYLD_LIBRARY_PATH") + endif() + + set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH") + list(APPEND libpath "$${LD_PATH_VAR}") + list(APPEND pypath "$PYTHONPATH") + + #replace list separator with the path separator + string(REPLACE ";" ":" libpath "${libpath}") + string(REPLACE ";" ":" pypath "${pypath}") + list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}") + + #generate a bat file that sets the environment and runs the test + if (CMAKE_CROSSCOMPILING) + set(SHELL "/bin/sh") + else(CMAKE_CROSSCOMPILING) + find_program(SHELL sh) + endif(CMAKE_CROSSCOMPILING) + set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh) + file(WRITE ${sh_file} "#!${SHELL}\n") + #each line sets an environment variable + foreach(environ ${environs}) + file(APPEND ${sh_file} "export ${environ}\n") + endforeach(environ) + #load the command to run with its arguments + foreach(arg ${ARGN}) + file(APPEND ${sh_file} "${arg} ") + endforeach(arg) + file(APPEND ${sh_file} "\n") + + #make the shell file executable + execute_process(COMMAND chmod +x ${sh_file}) + + add_test(${test_name} ${SHELL} ${sh_file}) + + endif(UNIX) + + if(WIN32) + list(APPEND libpath ${DLL_PATHS} "%PATH%") + list(APPEND pypath "%PYTHONPATH%") + + #replace list separator with the path separator (escaped) + string(REPLACE ";" "\\;" libpath "${libpath}") + string(REPLACE ";" "\\;" pypath "${pypath}") + list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}") + + #generate a bat file that sets the environment and runs the test + set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat) + file(WRITE ${bat_file} "@echo off\n") + #each line sets an environment variable + foreach(environ ${environs}) + file(APPEND ${bat_file} "SET ${environ}\n") + endforeach(environ) + #load the command to run with its arguments + foreach(arg ${ARGN}) + file(APPEND ${bat_file} "${arg} ") + endforeach(arg) + file(APPEND ${bat_file} "\n") + + add_test(${test_name} ${bat_file}) + endif(WIN32) + +endfunction(GR_ADD_TEST) diff --git a/cmake/Modules/UseSWIG.cmake b/cmake/Modules/UseSWIG.cmake new file mode 100644 index 0000000..c0f1728 --- /dev/null +++ b/cmake/Modules/UseSWIG.cmake @@ -0,0 +1,304 @@ +# - SWIG module for CMake +# Defines the following macros: +# SWIG_ADD_MODULE(name language [ files ]) +# - Define swig module with given name and specified language +# SWIG_LINK_LIBRARIES(name [ libraries ]) +# - Link libraries to swig module +# All other macros are for internal use only. +# To get the actual name of the swig module, +# use: ${SWIG_MODULE_${name}_REAL_NAME}. +# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify +# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add +# special flags to all swig calls. +# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify +# where to write all the swig generated module (swig -outdir option) +# The name-specific variable SWIG_MODULE__EXTRA_DEPS may be used +# to specify extra dependencies for the generated modules. +# If the source file generated by swig need some special flag you can use +# set_source_files_properties( ${swig_generated_file_fullname} +# PROPERTIES COMPILE_FLAGS "-bla") + + +#============================================================================= +# Copyright 2004-2009 Kitware, Inc. +# Copyright 2009 Mathieu Malaterre +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +set(SWIG_CXX_EXTENSION "cxx") +set(SWIG_EXTRA_LIBRARIES "") + +set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py") + +# +# For given swig module initialize variables associated with it +# +macro(SWIG_MODULE_INITIALIZE name language) + string(TOUPPER "${language}" swig_uppercase_language) + string(TOLOWER "${language}" swig_lowercase_language) + set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}") + set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}") + + set(SWIG_MODULE_${name}_REAL_NAME "${name}") + if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN") + message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found") + elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON") + # when swig is used without the -interface it will produce in the module.py + # a 'import _modulename' statement, which implies having a corresponding + # _modulename.so (*NIX), _modulename.pyd (Win32). + set(SWIG_MODULE_${name}_REAL_NAME "_${name}") + elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL") + set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow") + endif() +endmacro() + +# +# For a given language, input file, and output file, determine extra files that +# will be generated. This is internal swig macro. +# + +macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile) + set(${outfiles} "") + get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename + ${infile} SWIG_MODULE_NAME) + if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND") + get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE) + endif() + foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION}) + set(${outfiles} ${${outfiles}} + "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}") + endforeach() +endmacro() + +# +# Take swig (*.i) file and add proper custom commands for it +# +macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile) + set(swig_full_infile ${infile}) + get_filename_component(swig_source_file_path "${infile}" PATH) + get_filename_component(swig_source_file_name_we "${infile}" NAME_WE) + get_source_file_property(swig_source_file_generated ${infile} GENERATED) + get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS) + get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS) + if("${swig_source_file_flags}" STREQUAL "NOTFOUND") + set(swig_source_file_flags "") + endif() + set(swig_source_file_fullname "${infile}") + if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}") + string(REGEX REPLACE + "^${CMAKE_CURRENT_SOURCE_DIR}" "" + swig_source_file_relative_path + "${swig_source_file_path}") + else() + if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}") + string(REGEX REPLACE + "^${CMAKE_CURRENT_BINARY_DIR}" "" + swig_source_file_relative_path + "${swig_source_file_path}") + set(swig_source_file_generated 1) + else() + set(swig_source_file_relative_path "${swig_source_file_path}") + if(swig_source_file_generated) + set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}") + else() + set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}") + endif() + endif() + endif() + + set(swig_generated_file_fullname + "${CMAKE_CURRENT_BINARY_DIR}") + if(swig_source_file_relative_path) + set(swig_generated_file_fullname + "${swig_generated_file_fullname}/${swig_source_file_relative_path}") + endif() + # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir + if(CMAKE_SWIG_OUTDIR) + set(swig_outdir ${CMAKE_SWIG_OUTDIR}) + else() + set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR}) + endif() + SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE} + swig_extra_generated_files + "${swig_outdir}" + "${infile}") + set(swig_generated_file_fullname + "${swig_generated_file_fullname}/${swig_source_file_name_we}") + # add the language into the name of the file (i.e. TCL_wrap) + # this allows for the same .i file to be wrapped into different languages + set(swig_generated_file_fullname + "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap") + + if(swig_source_file_cplusplus) + set(swig_generated_file_fullname + "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}") + else() + set(swig_generated_file_fullname + "${swig_generated_file_fullname}.c") + endif() + + # Shut up some warnings from poor SWIG code generation that we + # can do nothing about, when this flag is available + include(CheckCXXCompilerFlag) + check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + set_source_files_properties(${swig_generated_file_fullname} + PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable") + endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + + get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES) + set(swig_include_dirs) + foreach(it ${cmake_include_directories}) + set(swig_include_dirs ${swig_include_dirs} "-I${it}") + endforeach() + + set(swig_special_flags) + # default is c, so add c++ flag if it is c++ + if(swig_source_file_cplusplus) + set(swig_special_flags ${swig_special_flags} "-c++") + endif() + set(swig_extra_flags) + if(SWIG_MODULE_${name}_EXTRA_FLAGS) + set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS}) + endif() + + # hack to work around CMake bug in add_custom_command with multiple OUTPUT files + + file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib +unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] +print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))" + OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + file( + WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in + "int main(void){return 0;}\n" + ) + + # create dummy dependencies + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in + ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp + DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS} + COMMENT "" + ) + + # create the dummy target + add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp) + + # add a custom command to the dummy target + add_custom_command( + TARGET ${_target} + # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir) + COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir} + COMMAND "${SWIG_EXECUTABLE}" + ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}" + ${swig_source_file_flags} + ${CMAKE_SWIG_FLAGS} + -outdir ${swig_outdir} + ${swig_special_flags} + ${swig_extra_flags} + ${swig_include_dirs} + -o "${swig_generated_file_fullname}" + "${swig_source_file_fullname}" + COMMENT "Swig source" + ) + + #add dummy independent dependencies from the _target to each file + #that will be generated by the SWIG command above + + set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files}) + + foreach(swig_gen_file ${${outfiles}}) + add_custom_command( + OUTPUT ${swig_gen_file} + COMMAND "" + DEPENDS ${_target} + COMMENT "" + ) + endforeach() + + set_source_files_properties( + ${outfiles} PROPERTIES GENERATED 1 + ) + +endmacro() + +# +# Create Swig module +# +macro(SWIG_ADD_MODULE name language) + SWIG_MODULE_INITIALIZE(${name} ${language}) + set(swig_dot_i_sources) + set(swig_other_sources) + foreach(it ${ARGN}) + if(${it} MATCHES ".*\\.i$") + set(swig_dot_i_sources ${swig_dot_i_sources} "${it}") + else() + set(swig_other_sources ${swig_other_sources} "${it}") + endif() + endforeach() + + set(swig_generated_sources) + foreach(it ${swig_dot_i_sources}) + SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it}) + set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}") + endforeach() + get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES) + set_directory_properties(PROPERTIES + ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}") + add_library(${SWIG_MODULE_${name}_REAL_NAME} + MODULE + ${swig_generated_sources} + ${swig_other_sources}) + string(TOLOWER "${language}" swig_lowercase_language) + if ("${swig_lowercase_language}" STREQUAL "java") + if (APPLE) + # In java you want: + # System.loadLibrary("LIBRARY"); + # then JNI will look for a library whose name is platform dependent, namely + # MacOS : libLIBRARY.jnilib + # Windows: LIBRARY.dll + # Linux : libLIBRARY.so + set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib") + endif () + endif () + if ("${swig_lowercase_language}" STREQUAL "python") + # this is only needed for the python case where a _modulename.so is generated + set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "") + # Python extension modules on Windows must have the extension ".pyd" + # instead of ".dll" as of Python 2.5. Older python versions do support + # this suffix. + # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000 + # + # Windows: .dll is no longer supported as a filename extension for extension modules. + # .pyd is now the only filename extension that will be searched for. + # + if(WIN32 AND NOT CYGWIN) + set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd") + endif() + endif () +endmacro() + +# +# Like TARGET_LINK_LIBRARIES but for swig modules +# +macro(SWIG_LINK_LIBRARIES name) + if(SWIG_MODULE_${name}_REAL_NAME) + target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN}) + else() + message(SEND_ERROR "Cannot find Swig library \"${name}\".") + endif() +endmacro() diff --git a/cmake/Modules/nordicConfig.cmake b/cmake/Modules/nordicConfig.cmake new file mode 100644 index 0000000..366ceeb --- /dev/null +++ b/cmake/Modules/nordicConfig.cmake @@ -0,0 +1,30 @@ +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_NORDIC nordic) + +FIND_PATH( + NORDIC_INCLUDE_DIRS + NAMES nordic/api.h + HINTS $ENV{NORDIC_DIR}/include + ${PC_NORDIC_INCLUDEDIR} + PATHS ${CMAKE_INSTALL_PREFIX}/include + /usr/local/include + /usr/include +) + +FIND_LIBRARY( + NORDIC_LIBRARIES + NAMES gnuradio-nordic + HINTS $ENV{NORDIC_DIR}/lib + ${PC_NORDIC_LIBDIR} + PATHS ${CMAKE_INSTALL_PREFIX}/lib + ${CMAKE_INSTALL_PREFIX}/lib64 + /usr/local/lib + /usr/local/lib64 + /usr/lib + /usr/lib64 +) + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(NORDIC DEFAULT_MSG NORDIC_LIBRARIES NORDIC_INCLUDE_DIRS) +MARK_AS_ADVANCED(NORDIC_LIBRARIES NORDIC_INCLUDE_DIRS) + diff --git a/cmake/cmake_uninstall.cmake.in b/cmake/cmake_uninstall.cmake.in new file mode 100644 index 0000000..9ae1ae4 --- /dev/null +++ b/cmake/cmake_uninstall.cmake.in @@ -0,0 +1,32 @@ +# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F + +IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") +ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + +FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) +STRING(REGEX REPLACE "\n" ";" files "${files}") +FOREACH(file ${files}) + MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") + IF(EXISTS "$ENV{DESTDIR}${file}") + EXEC_PROGRAM( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + IF(NOT "${rm_retval}" STREQUAL 0) + MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") + ENDIF(NOT "${rm_retval}" STREQUAL 0) + ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}") + EXEC_PROGRAM( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + IF(NOT "${rm_retval}" STREQUAL 0) + MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") + ENDIF(NOT "${rm_retval}" STREQUAL 0) + ELSE(EXISTS "$ENV{DESTDIR}${file}") + MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") + ENDIF(EXISTS "$ENV{DESTDIR}${file}") +ENDFOREACH(file) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt new file mode 100644 index 0000000..32aedd1 --- /dev/null +++ b/docs/CMakeLists.txt @@ -0,0 +1,31 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +######################################################################## +# Setup dependencies +######################################################################## +find_package(Doxygen) + +######################################################################## +# Begin conditional configuration +######################################################################## +if(ENABLE_DOXYGEN) + +######################################################################## +# Add subdirectories +######################################################################## +add_subdirectory(doxygen) + +endif(ENABLE_DOXYGEN) diff --git a/docs/README.nordic b/docs/README.nordic new file mode 100644 index 0000000..f678e88 --- /dev/null +++ b/docs/README.nordic @@ -0,0 +1,11 @@ +This is the nordic-write-a-block package meant as a guide to building +out-of-tree packages. To use the nordic blocks, the Python namespaces +is in 'nordic', which is imported as: + + import nordic + +See the Doxygen documentation for details about the blocks available +in this package. A quick listing of the details can be found in Python +after importing by using: + + help(nordic) diff --git a/docs/doxygen/CMakeLists.txt b/docs/doxygen/CMakeLists.txt new file mode 100644 index 0000000..107f05e --- /dev/null +++ b/docs/doxygen/CMakeLists.txt @@ -0,0 +1,48 @@ +# Copyright (C) 2016 Bastille Networks +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +######################################################################## +# Create the doxygen configuration file +######################################################################## +file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir) +file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir) +file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir) +file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir) + +set(HAVE_DOT ${DOXYGEN_DOT_FOUND}) +set(enable_html_docs YES) +set(enable_latex_docs NO) +set(enable_xml_docs YES) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in + ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile +@ONLY) + +set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html) + +######################################################################## +# Make and install doxygen docs +######################################################################## +add_custom_command( + OUTPUT ${BUILT_DIRS} + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating documentation with doxygen" +) + +add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS}) + +install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR}) diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in new file mode 100644 index 0000000..17a451d --- /dev/null +++ b/docs/doxygen/Doxyfile.in @@ -0,0 +1,1922 @@ +# Doxyfile 1.8.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed +# in front of the TAG it is preceding . +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = "GNU Radio's NORDIC Package" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, +# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, +# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = YES + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = YES + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields or simple typedef fields will be shown +# inline in the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO (the default), structs, classes, and unions are shown on a separate +# page (for HTML and Man pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can +# be an expensive process and often the same symbol appear multiple times in +# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too +# small doxygen will become slower. If the cache is too large, memory is wasted. +# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid +# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 +# symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = NO + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = NO + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = NO + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= NO + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text " + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @top_srcdir@ \ + @top_builddir@ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.h \ + *.dox + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = @abs_top_builddir@/docs/doxygen/html \ + @abs_top_builddir@/docs/doxygen/xml \ + @abs_top_builddir@/docs/doxygen/other/doxypy.py \ + @abs_top_builddir@/_CPack_Packages \ + @abs_top_srcdir@/cmake + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = */.deps/* \ + */.libs/* \ + */.svn/* \ + */CVS/* \ + */__init__.py \ + */qa_*.cc \ + */qa_*.h \ + */qa_*.py + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = ad9862 \ + numpy \ + *swig* \ + *Swig* \ + *my_top_block* \ + *my_graph* \ + *app_top_block* \ + *am_rx_graph* \ + *_queue_watcher_thread* \ + *parse* \ + *MyFrame* \ + *MyApp* \ + *PyObject* \ + *wfm_rx_block* \ + *_sptr* \ + *debug* \ + *wfm_rx_sca_block* \ + *tv_rx_block* \ + *wxapt_rx_block* \ + *example_signal* + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be ignored. +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = *.py=@top_srcdir@/doc/doxygen/other/doxypy.py + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = @enable_html_docs@ + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = YES + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = YES + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 180 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript +# pieces of code that will be used on startup of the MathJax code. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = NO + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search +# engine library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id +# of to a relative location where the documentation can be found. +# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = @enable_latex_docs@ + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4 will be used. + +PAPER_TYPE = letter + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images +# or other source files which should be copied to the LaTeX output directory. +# Note that the files will be copied as-is; there are no commands or markers +# available. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = @enable_xml_docs@ + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = NO + +#--------------------------------------------------------------------------- +# configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files +# that can be used to generate PDF. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. If left blank docbook will be used as the default path. + +DOCBOOK_OUTPUT = docbook + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed +# in the related pages index. If set to NO, only the current project's +# pages will be listed. + +EXTERNAL_PAGES = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = @HAVE_DOT@ + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = NO + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# manageable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/docs/doxygen/Doxyfile.swig_doc.in b/docs/doxygen/Doxyfile.swig_doc.in new file mode 100644 index 0000000..57736d7 --- /dev/null +++ b/docs/doxygen/Doxyfile.swig_doc.in @@ -0,0 +1,1890 @@ +# Doxyfile 1.8.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed +# in front of the TAG it is preceding . +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = @CPACK_PACKAGE_NAME@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@ + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, +# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, +# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields or simple typedef fields will be shown +# inline in the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO (the default), structs, classes, and unions are shown on a separate +# page (for HTML and Man pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can +# be an expensive process and often the same symbol appear multiple times in +# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too +# small doxygen will become slower. If the cache is too large, memory is wasted. +# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid +# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 +# symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @INPUT_PATHS@ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.h + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be ignored. +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = NO + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript +# pieces of code that will be used on startup of the MathJax code. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search +# engine library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id +# of to a relative location where the documentation can be found. +# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4 will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images +# or other source files which should be copied to the LaTeX output directory. +# Note that the files will be copied as-is; there are no commands or markers +# available. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = YES + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files +# that can be used to generate PDF. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. If left blank docbook will be used as the default path. + +DOCBOOK_OUTPUT = docbook + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed +# in the related pages index. If set to NO, only the current project's +# pages will be listed. + +EXTERNAL_PAGES = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# manageable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/docs/doxygen/doxyxml/__init__.py b/docs/doxygen/doxyxml/__init__.py new file mode 100644 index 0000000..6df88dc --- /dev/null +++ b/docs/doxygen/doxyxml/__init__.py @@ -0,0 +1,79 @@ +''' + Copyright (C) 2016 Bastille Networks + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +""" +Python interface to contents of doxygen xml documentation. + +Example use: +See the contents of the example folder for the C++ and +doxygen-generated xml used in this example. + +>>> # Parse the doxygen docs. +>>> import os +>>> this_dir = os.path.dirname(globals()['__file__']) +>>> xml_path = this_dir + "/example/xml/" +>>> di = DoxyIndex(xml_path) + +Get a list of all top-level objects. + +>>> print([mem.name() for mem in di.members()]) +[u'Aadvark', u'aadvarky_enough', u'main'] + +Get all functions. + +>>> print([mem.name() for mem in di.in_category(DoxyFunction)]) +[u'aadvarky_enough', u'main'] + +Check if an object is present. + +>>> di.has_member(u'Aadvark') +True +>>> di.has_member(u'Fish') +False + +Get an item by name and check its properties. + +>>> aad = di.get_member(u'Aadvark') +>>> print(aad.brief_description) +Models the mammal Aadvark. +>>> print(aad.detailed_description) +Sadly the model is incomplete and cannot capture all aspects of an aadvark yet. + +This line is uninformative and is only to test line breaks in the comments. +>>> [mem.name() for mem in aad.members()] +[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness'] +>>> aad.get_member(u'print').brief_description +u'Outputs the vital aadvark statistics.' + +""" + +from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther + +def _test(): + import os + this_dir = os.path.dirname(globals()['__file__']) + xml_path = this_dir + "/example/xml/" + di = DoxyIndex(xml_path) + # Get the Aadvark class + aad = di.get_member('Aadvark') + aad.brief_description + import doctest + return doctest.testmod() + +if __name__ == "__main__": + _test() + diff --git a/docs/doxygen/doxyxml/base.py b/docs/doxygen/doxyxml/base.py new file mode 100644 index 0000000..383dc26 --- /dev/null +++ b/docs/doxygen/doxyxml/base.py @@ -0,0 +1,216 @@ +''' + Copyright (C) 2016 Bastille Networks + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +""" +A base class is created. + +Classes based upon this are used to make more user-friendly interfaces +to the doxygen xml docs than the generated classes provide. +""" + +import os +import pdb + +from xml.parsers.expat import ExpatError + +from generated import compound + + +class Base(object): + + class Duplicate(StandardError): + pass + + class NoSuchMember(StandardError): + pass + + class ParsingError(StandardError): + pass + + def __init__(self, parse_data, top=None): + self._parsed = False + self._error = False + self._parse_data = parse_data + self._members = [] + self._dict_members = {} + self._in_category = {} + self._data = {} + if top is not None: + self._xml_path = top._xml_path + # Set up holder of references + else: + top = self + self._refs = {} + self._xml_path = parse_data + self.top = top + + @classmethod + def from_refid(cls, refid, top=None): + """ Instantiate class from a refid rather than parsing object. """ + # First check to see if its already been instantiated. + if top is not None and refid in top._refs: + return top._refs[refid] + # Otherwise create a new instance and set refid. + inst = cls(None, top=top) + inst.refid = refid + inst.add_ref(inst) + return inst + + @classmethod + def from_parse_data(cls, parse_data, top=None): + refid = getattr(parse_data, 'refid', None) + if refid is not None and top is not None and refid in top._refs: + return top._refs[refid] + inst = cls(parse_data, top=top) + if refid is not None: + inst.refid = refid + inst.add_ref(inst) + return inst + + def add_ref(self, obj): + if hasattr(obj, 'refid'): + self.top._refs[obj.refid] = obj + + mem_classes = [] + + def get_cls(self, mem): + for cls in self.mem_classes: + if cls.can_parse(mem): + return cls + raise StandardError(("Did not find a class for object '%s'." \ + % (mem.get_name()))) + + def convert_mem(self, mem): + try: + cls = self.get_cls(mem) + converted = cls.from_parse_data(mem, self.top) + if converted is None: + raise StandardError('No class matched this object.') + self.add_ref(converted) + return converted + except StandardError, e: + print e + + @classmethod + def includes(cls, inst): + return isinstance(inst, cls) + + @classmethod + def can_parse(cls, obj): + return False + + def _parse(self): + self._parsed = True + + def _get_dict_members(self, cat=None): + """ + For given category a dictionary is returned mapping member names to + members of that category. For names that are duplicated the name is + mapped to None. + """ + self.confirm_no_error() + if cat not in self._dict_members: + new_dict = {} + for mem in self.in_category(cat): + if mem.name() not in new_dict: + new_dict[mem.name()] = mem + else: + new_dict[mem.name()] = self.Duplicate + self._dict_members[cat] = new_dict + return self._dict_members[cat] + + def in_category(self, cat): + self.confirm_no_error() + if cat is None: + return self._members + if cat not in self._in_category: + self._in_category[cat] = [mem for mem in self._members + if cat.includes(mem)] + return self._in_category[cat] + + def get_member(self, name, cat=None): + self.confirm_no_error() + # Check if it's in a namespace or class. + bits = name.split('::') + first = bits[0] + rest = '::'.join(bits[1:]) + member = self._get_dict_members(cat).get(first, self.NoSuchMember) + # Raise any errors that are returned. + if member in set([self.NoSuchMember, self.Duplicate]): + raise member() + if rest: + return member.get_member(rest, cat=cat) + return member + + def has_member(self, name, cat=None): + try: + mem = self.get_member(name, cat=cat) + return True + except self.NoSuchMember: + return False + + def data(self): + self.confirm_no_error() + return self._data + + def members(self): + self.confirm_no_error() + return self._members + + def process_memberdefs(self): + mdtss = [] + for sec in self._retrieved_data.compounddef.sectiondef: + mdtss += sec.memberdef + # At the moment we lose all information associated with sections. + # Sometimes a memberdef is in several sectiondef. + # We make sure we don't get duplicates here. + uniques = set([]) + for mem in mdtss: + converted = self.convert_mem(mem) + pair = (mem.name, mem.__class__) + if pair not in uniques: + uniques.add(pair) + self._members.append(converted) + + def retrieve_data(self): + filename = os.path.join(self._xml_path, self.refid + '.xml') + try: + self._retrieved_data = compound.parse(filename) + except ExpatError: + print('Error in xml in file %s' % filename) + self._error = True + self._retrieved_data = None + + def check_parsed(self): + if not self._parsed: + self._parse() + + def confirm_no_error(self): + self.check_parsed() + if self._error: + raise self.ParsingError() + + def error(self): + self.check_parsed() + return self._error + + def name(self): + # first see if we can do it without processing. + if self._parse_data is not None: + return self._parse_data.name + self.check_parsed() + return self._retrieved_data.compounddef.name diff --git a/docs/doxygen/doxyxml/doxyindex.py b/docs/doxygen/doxyxml/doxyindex.py new file mode 100644 index 0000000..8fed1b4 --- /dev/null +++ b/docs/doxygen/doxyxml/doxyindex.py @@ -0,0 +1,234 @@ +''' + Copyright (C) 2016 Bastille Networks + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +""" +Classes providing more user-friendly interfaces to the doxygen xml +docs than the generated classes provide. +""" + +import os + +from generated import index +from base import Base +from text import description + +class DoxyIndex(Base): + """ + Parses a doxygen xml directory. + """ + + __module__ = "gnuradio.utils.doxyxml" + + def _parse(self): + if self._parsed: + return + super(DoxyIndex, self)._parse() + self._root = index.parse(os.path.join(self._xml_path, 'index.xml')) + for mem in self._root.compound: + converted = self.convert_mem(mem) + # For files we want the contents to be accessible directly + # from the parent rather than having to go through the file + # object. + if self.get_cls(mem) == DoxyFile: + if mem.name.endswith('.h'): + self._members += converted.members() + self._members.append(converted) + else: + self._members.append(converted) + + +def generate_swig_doc_i(self): + """ + %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " + Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; + """ + pass + + +class DoxyCompMem(Base): + + + kind = None + + def __init__(self, *args, **kwargs): + super(DoxyCompMem, self).__init__(*args, **kwargs) + + @classmethod + def can_parse(cls, obj): + return obj.kind == cls.kind + + def set_descriptions(self, parse_data): + bd = description(getattr(parse_data, 'briefdescription', None)) + dd = description(getattr(parse_data, 'detaileddescription', None)) + self._data['brief_description'] = bd + self._data['detailed_description'] = dd + +class DoxyCompound(DoxyCompMem): + pass + +class DoxyMember(DoxyCompMem): + pass + + +class DoxyFunction(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'function' + + def _parse(self): + if self._parsed: + return + super(DoxyFunction, self)._parse() + self.set_descriptions(self._parse_data) + self._data['params'] = [] + prms = self._parse_data.param + for prm in prms: + self._data['params'].append(DoxyParam(prm)) + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + params = property(lambda self: self.data()['params']) + +Base.mem_classes.append(DoxyFunction) + + +class DoxyParam(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + def _parse(self): + if self._parsed: + return + super(DoxyParam, self)._parse() + self.set_descriptions(self._parse_data) + self._data['declname'] = self._parse_data.declname + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + declname = property(lambda self: self.data()['declname']) + +class DoxyClass(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'class' + + def _parse(self): + if self._parsed: + return + super(DoxyClass, self)._parse() + self.retrieve_data() + if self._error: + return + self.set_descriptions(self._retrieved_data.compounddef) + # Sectiondef.kind tells about whether private or public. + # We just ignore this for now. + self.process_memberdefs() + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + +Base.mem_classes.append(DoxyClass) + + +class DoxyFile(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'file' + + def _parse(self): + if self._parsed: + return + super(DoxyFile, self)._parse() + self.retrieve_data() + self.set_descriptions(self._retrieved_data.compounddef) + if self._error: + return + self.process_memberdefs() + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + +Base.mem_classes.append(DoxyFile) + + +class DoxyNamespace(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'namespace' + +Base.mem_classes.append(DoxyNamespace) + + +class DoxyGroup(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'group' + + def _parse(self): + if self._parsed: + return + super(DoxyGroup, self)._parse() + self.retrieve_data() + if self._error: + return + cdef = self._retrieved_data.compounddef + self._data['title'] = description(cdef.title) + # Process inner groups + grps = cdef.innergroup + for grp in grps: + converted = DoxyGroup.from_refid(grp.refid, top=self.top) + self._members.append(converted) + # Process inner classes + klasses = cdef.innerclass + for kls in klasses: + converted = DoxyClass.from_refid(kls.refid, top=self.top) + self._members.append(converted) + # Process normal members + self.process_memberdefs() + + title = property(lambda self: self.data()['title']) + + +Base.mem_classes.append(DoxyGroup) + + +class DoxyFriend(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'friend' + +Base.mem_classes.append(DoxyFriend) + + +class DoxyOther(Base): + + __module__ = "gnuradio.utils.doxyxml" + + kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page']) + + @classmethod + def can_parse(cls, obj): + return obj.kind in cls.kinds + +Base.mem_classes.append(DoxyOther) + diff --git a/docs/doxygen/doxyxml/generated/__init__.py b/docs/doxygen/doxyxml/generated/__init__.py new file mode 100644 index 0000000..3982397 --- /dev/null +++ b/docs/doxygen/doxyxml/generated/__init__.py @@ -0,0 +1,7 @@ +""" +Contains generated files produced by generateDS.py. + +These do the real work of parsing the doxygen xml files but the +resultant classes are not very friendly to navigate so the rest of the +doxyxml module processes them further. +""" diff --git a/docs/doxygen/doxyxml/generated/compound.py b/docs/doxygen/doxyxml/generated/compound.py new file mode 100644 index 0000000..1522ac2 --- /dev/null +++ b/docs/doxygen/doxyxml/generated/compound.py @@ -0,0 +1,503 @@ +#!/usr/bin/env python + +""" +Generated Mon Feb 9 19:08:05 2009 by generateDS.py. +""" + +from string import lower as str_lower +from xml.dom import minidom +from xml.dom import Node + +import sys + +import compoundsuper as supermod +from compoundsuper import MixedContainer + + +class DoxygenTypeSub(supermod.DoxygenType): + def __init__(self, version=None, compounddef=None): + supermod.DoxygenType.__init__(self, version, compounddef) + + def find(self, details): + + return self.compounddef.find(details) + +supermod.DoxygenType.subclass = DoxygenTypeSub +# end class DoxygenTypeSub + + +class compounddefTypeSub(supermod.compounddefType): + def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): + supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers) + + def find(self, details): + + if self.id == details.refid: + return self + + for sectiondef in self.sectiondef: + result = sectiondef.find(details) + if result: + return result + + +supermod.compounddefType.subclass = compounddefTypeSub +# end class compounddefTypeSub + + +class listofallmembersTypeSub(supermod.listofallmembersType): + def __init__(self, member=None): + supermod.listofallmembersType.__init__(self, member) +supermod.listofallmembersType.subclass = listofallmembersTypeSub +# end class listofallmembersTypeSub + + +class memberRefTypeSub(supermod.memberRefType): + def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''): + supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name) +supermod.memberRefType.subclass = memberRefTypeSub +# end class memberRefTypeSub + + +class compoundRefTypeSub(supermod.compoundRefType): + def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.compoundRefType.__init__(self, mixedclass_, content_) +supermod.compoundRefType.subclass = compoundRefTypeSub +# end class compoundRefTypeSub + + +class reimplementTypeSub(supermod.reimplementType): + def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.reimplementType.__init__(self, mixedclass_, content_) +supermod.reimplementType.subclass = reimplementTypeSub +# end class reimplementTypeSub + + +class incTypeSub(supermod.incType): + def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.incType.__init__(self, mixedclass_, content_) +supermod.incType.subclass = incTypeSub +# end class incTypeSub + + +class refTypeSub(supermod.refType): + def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.refType.__init__(self, mixedclass_, content_) +supermod.refType.subclass = refTypeSub +# end class refTypeSub + + + +class refTextTypeSub(supermod.refTextType): + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + supermod.refTextType.__init__(self, mixedclass_, content_) + +supermod.refTextType.subclass = refTextTypeSub +# end class refTextTypeSub + +class sectiondefTypeSub(supermod.sectiondefType): + + + def __init__(self, kind=None, header='', description=None, memberdef=None): + supermod.sectiondefType.__init__(self, kind, header, description, memberdef) + + def find(self, details): + + for memberdef in self.memberdef: + if memberdef.id == details.refid: + return memberdef + + return None + + +supermod.sectiondefType.subclass = sectiondefTypeSub +# end class sectiondefTypeSub + + +class memberdefTypeSub(supermod.memberdefType): + def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): + supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby) +supermod.memberdefType.subclass = memberdefTypeSub +# end class memberdefTypeSub + + +class descriptionTypeSub(supermod.descriptionType): + def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None): + supermod.descriptionType.__init__(self, mixedclass_, content_) +supermod.descriptionType.subclass = descriptionTypeSub +# end class descriptionTypeSub + + +class enumvalueTypeSub(supermod.enumvalueType): + def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): + supermod.enumvalueType.__init__(self, mixedclass_, content_) +supermod.enumvalueType.subclass = enumvalueTypeSub +# end class enumvalueTypeSub + + +class templateparamlistTypeSub(supermod.templateparamlistType): + def __init__(self, param=None): + supermod.templateparamlistType.__init__(self, param) +supermod.templateparamlistType.subclass = templateparamlistTypeSub +# end class templateparamlistTypeSub + + +class paramTypeSub(supermod.paramType): + def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None): + supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription) +supermod.paramType.subclass = paramTypeSub +# end class paramTypeSub + + +class linkedTextTypeSub(supermod.linkedTextType): + def __init__(self, ref=None, mixedclass_=None, content_=None): + supermod.linkedTextType.__init__(self, mixedclass_, content_) +supermod.linkedTextType.subclass = linkedTextTypeSub +# end class linkedTextTypeSub + + +class graphTypeSub(supermod.graphType): + def __init__(self, node=None): + supermod.graphType.__init__(self, node) +supermod.graphType.subclass = graphTypeSub +# end class graphTypeSub + + +class nodeTypeSub(supermod.nodeType): + def __init__(self, id=None, label='', link=None, childnode=None): + supermod.nodeType.__init__(self, id, label, link, childnode) +supermod.nodeType.subclass = nodeTypeSub +# end class nodeTypeSub + + +class childnodeTypeSub(supermod.childnodeType): + def __init__(self, relation=None, refid=None, edgelabel=None): + supermod.childnodeType.__init__(self, relation, refid, edgelabel) +supermod.childnodeType.subclass = childnodeTypeSub +# end class childnodeTypeSub + + +class linkTypeSub(supermod.linkType): + def __init__(self, refid=None, external=None, valueOf_=''): + supermod.linkType.__init__(self, refid, external) +supermod.linkType.subclass = linkTypeSub +# end class linkTypeSub + + +class listingTypeSub(supermod.listingType): + def __init__(self, codeline=None): + supermod.listingType.__init__(self, codeline) +supermod.listingType.subclass = listingTypeSub +# end class listingTypeSub + + +class codelineTypeSub(supermod.codelineType): + def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): + supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) +supermod.codelineType.subclass = codelineTypeSub +# end class codelineTypeSub + + +class highlightTypeSub(supermod.highlightType): + def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None): + supermod.highlightType.__init__(self, mixedclass_, content_) +supermod.highlightType.subclass = highlightTypeSub +# end class highlightTypeSub + + +class referenceTypeSub(supermod.referenceType): + def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): + supermod.referenceType.__init__(self, mixedclass_, content_) +supermod.referenceType.subclass = referenceTypeSub +# end class referenceTypeSub + + +class locationTypeSub(supermod.locationType): + def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): + supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file) +supermod.locationType.subclass = locationTypeSub +# end class locationTypeSub + + +class docSect1TypeSub(supermod.docSect1Type): + def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect1Type.__init__(self, mixedclass_, content_) +supermod.docSect1Type.subclass = docSect1TypeSub +# end class docSect1TypeSub + + +class docSect2TypeSub(supermod.docSect2Type): + def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect2Type.__init__(self, mixedclass_, content_) +supermod.docSect2Type.subclass = docSect2TypeSub +# end class docSect2TypeSub + + +class docSect3TypeSub(supermod.docSect3Type): + def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect3Type.__init__(self, mixedclass_, content_) +supermod.docSect3Type.subclass = docSect3TypeSub +# end class docSect3TypeSub + + +class docSect4TypeSub(supermod.docSect4Type): + def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect4Type.__init__(self, mixedclass_, content_) +supermod.docSect4Type.subclass = docSect4TypeSub +# end class docSect4TypeSub + + +class docInternalTypeSub(supermod.docInternalType): + def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): + supermod.docInternalType.__init__(self, mixedclass_, content_) +supermod.docInternalType.subclass = docInternalTypeSub +# end class docInternalTypeSub + + +class docInternalS1TypeSub(supermod.docInternalS1Type): + def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): + supermod.docInternalS1Type.__init__(self, mixedclass_, content_) +supermod.docInternalS1Type.subclass = docInternalS1TypeSub +# end class docInternalS1TypeSub + + +class docInternalS2TypeSub(supermod.docInternalS2Type): + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + supermod.docInternalS2Type.__init__(self, mixedclass_, content_) +supermod.docInternalS2Type.subclass = docInternalS2TypeSub +# end class docInternalS2TypeSub + + +class docInternalS3TypeSub(supermod.docInternalS3Type): + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + supermod.docInternalS3Type.__init__(self, mixedclass_, content_) +supermod.docInternalS3Type.subclass = docInternalS3TypeSub +# end class docInternalS3TypeSub + + +class docInternalS4TypeSub(supermod.docInternalS4Type): + def __init__(self, para=None, mixedclass_=None, content_=None): + supermod.docInternalS4Type.__init__(self, mixedclass_, content_) +supermod.docInternalS4Type.subclass = docInternalS4TypeSub +# end class docInternalS4TypeSub + + +class docURLLinkSub(supermod.docURLLink): + def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docURLLink.__init__(self, mixedclass_, content_) +supermod.docURLLink.subclass = docURLLinkSub +# end class docURLLinkSub + + +class docAnchorTypeSub(supermod.docAnchorType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docAnchorType.__init__(self, mixedclass_, content_) +supermod.docAnchorType.subclass = docAnchorTypeSub +# end class docAnchorTypeSub + + +class docFormulaTypeSub(supermod.docFormulaType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docFormulaType.__init__(self, mixedclass_, content_) +supermod.docFormulaType.subclass = docFormulaTypeSub +# end class docFormulaTypeSub + + +class docIndexEntryTypeSub(supermod.docIndexEntryType): + def __init__(self, primaryie='', secondaryie=''): + supermod.docIndexEntryType.__init__(self, primaryie, secondaryie) +supermod.docIndexEntryType.subclass = docIndexEntryTypeSub +# end class docIndexEntryTypeSub + + +class docListTypeSub(supermod.docListType): + def __init__(self, listitem=None): + supermod.docListType.__init__(self, listitem) +supermod.docListType.subclass = docListTypeSub +# end class docListTypeSub + + +class docListItemTypeSub(supermod.docListItemType): + def __init__(self, para=None): + supermod.docListItemType.__init__(self, para) +supermod.docListItemType.subclass = docListItemTypeSub +# end class docListItemTypeSub + + +class docSimpleSectTypeSub(supermod.docSimpleSectType): + def __init__(self, kind=None, title=None, para=None): + supermod.docSimpleSectType.__init__(self, kind, title, para) +supermod.docSimpleSectType.subclass = docSimpleSectTypeSub +# end class docSimpleSectTypeSub + + +class docVarListEntryTypeSub(supermod.docVarListEntryType): + def __init__(self, term=None): + supermod.docVarListEntryType.__init__(self, term) +supermod.docVarListEntryType.subclass = docVarListEntryTypeSub +# end class docVarListEntryTypeSub + + +class docRefTextTypeSub(supermod.docRefTextType): + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docRefTextType.__init__(self, mixedclass_, content_) +supermod.docRefTextType.subclass = docRefTextTypeSub +# end class docRefTextTypeSub + + +class docTableTypeSub(supermod.docTableType): + def __init__(self, rows=None, cols=None, row=None, caption=None): + supermod.docTableType.__init__(self, rows, cols, row, caption) +supermod.docTableType.subclass = docTableTypeSub +# end class docTableTypeSub + + +class docRowTypeSub(supermod.docRowType): + def __init__(self, entry=None): + supermod.docRowType.__init__(self, entry) +supermod.docRowType.subclass = docRowTypeSub +# end class docRowTypeSub + + +class docEntryTypeSub(supermod.docEntryType): + def __init__(self, thead=None, para=None): + supermod.docEntryType.__init__(self, thead, para) +supermod.docEntryType.subclass = docEntryTypeSub +# end class docEntryTypeSub + + +class docHeadingTypeSub(supermod.docHeadingType): + def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docHeadingType.__init__(self, mixedclass_, content_) +supermod.docHeadingType.subclass = docHeadingTypeSub +# end class docHeadingTypeSub + + +class docImageTypeSub(supermod.docImageType): + def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docImageType.__init__(self, mixedclass_, content_) +supermod.docImageType.subclass = docImageTypeSub +# end class docImageTypeSub + + +class docDotFileTypeSub(supermod.docDotFileType): + def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docDotFileType.__init__(self, mixedclass_, content_) +supermod.docDotFileType.subclass = docDotFileTypeSub +# end class docDotFileTypeSub + + +class docTocItemTypeSub(supermod.docTocItemType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docTocItemType.__init__(self, mixedclass_, content_) +supermod.docTocItemType.subclass = docTocItemTypeSub +# end class docTocItemTypeSub + + +class docTocListTypeSub(supermod.docTocListType): + def __init__(self, tocitem=None): + supermod.docTocListType.__init__(self, tocitem) +supermod.docTocListType.subclass = docTocListTypeSub +# end class docTocListTypeSub + + +class docLanguageTypeSub(supermod.docLanguageType): + def __init__(self, langid=None, para=None): + supermod.docLanguageType.__init__(self, langid, para) +supermod.docLanguageType.subclass = docLanguageTypeSub +# end class docLanguageTypeSub + + +class docParamListTypeSub(supermod.docParamListType): + def __init__(self, kind=None, parameteritem=None): + supermod.docParamListType.__init__(self, kind, parameteritem) +supermod.docParamListType.subclass = docParamListTypeSub +# end class docParamListTypeSub + + +class docParamListItemSub(supermod.docParamListItem): + def __init__(self, parameternamelist=None, parameterdescription=None): + supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription) +supermod.docParamListItem.subclass = docParamListItemSub +# end class docParamListItemSub + + +class docParamNameListSub(supermod.docParamNameList): + def __init__(self, parametername=None): + supermod.docParamNameList.__init__(self, parametername) +supermod.docParamNameList.subclass = docParamNameListSub +# end class docParamNameListSub + + +class docParamNameSub(supermod.docParamName): + def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): + supermod.docParamName.__init__(self, mixedclass_, content_) +supermod.docParamName.subclass = docParamNameSub +# end class docParamNameSub + + +class docXRefSectTypeSub(supermod.docXRefSectType): + def __init__(self, id=None, xreftitle=None, xrefdescription=None): + supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription) +supermod.docXRefSectType.subclass = docXRefSectTypeSub +# end class docXRefSectTypeSub + + +class docCopyTypeSub(supermod.docCopyType): + def __init__(self, link=None, para=None, sect1=None, internal=None): + supermod.docCopyType.__init__(self, link, para, sect1, internal) +supermod.docCopyType.subclass = docCopyTypeSub +# end class docCopyTypeSub + + +class docCharTypeSub(supermod.docCharType): + def __init__(self, char=None, valueOf_=''): + supermod.docCharType.__init__(self, char) +supermod.docCharType.subclass = docCharTypeSub +# end class docCharTypeSub + +class docParaTypeSub(supermod.docParaType): + def __init__(self, char=None, valueOf_=''): + supermod.docParaType.__init__(self, char) + + self.parameterlist = [] + self.simplesects = [] + self.content = [] + + def buildChildren(self, child_, nodeName_): + supermod.docParaType.buildChildren(self, child_, nodeName_) + + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == "ref": + obj_ = supermod.docRefTextType.factory() + obj_.build(child_) + self.content.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameterlist': + obj_ = supermod.docParamListType.factory() + obj_.build(child_) + self.parameterlist.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'simplesect': + obj_ = supermod.docSimpleSectType.factory() + obj_.build(child_) + self.simplesects.append(obj_) + + +supermod.docParaType.subclass = docParaTypeSub +# end class docParaTypeSub + + + +def parse(inFilename): + doc = minidom.parse(inFilename) + rootNode = doc.documentElement + rootObj = supermod.DoxygenType.factory() + rootObj.build(rootNode) + return rootObj + + diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.py b/docs/doxygen/doxyxml/generated/compoundsuper.py new file mode 100644 index 0000000..6255dda --- /dev/null +++ b/docs/doxygen/doxyxml/generated/compoundsuper.py @@ -0,0 +1,8342 @@ +#!/usr/bin/env python + +# +# Generated Thu Jun 11 18:44:25 2009 by generateDS.py. +# + +import sys +import getopt +from string import lower as str_lower +from xml.dom import minidom +from xml.dom import Node + +# +# User methods +# +# Calls to the methods in these classes are generated by generateDS.py. +# You can replace these methods by re-implementing the following class +# in a module named generatedssuper.py. + +try: + from generatedssuper import GeneratedsSuper +except ImportError, exp: + + class GeneratedsSuper: + def format_string(self, input_data, input_name=''): + return input_data + def format_integer(self, input_data, input_name=''): + return '%d' % input_data + def format_float(self, input_data, input_name=''): + return '%f' % input_data + def format_double(self, input_data, input_name=''): + return '%e' % input_data + def format_boolean(self, input_data, input_name=''): + return '%s' % input_data + + +# +# If you have installed IPython you can uncomment and use the following. +# IPython is available from http://ipython.scipy.org/. +# + +## from IPython.Shell import IPShellEmbed +## args = '' +## ipshell = IPShellEmbed(args, +## banner = 'Dropping into IPython', +## exit_msg = 'Leaving Interpreter, back to program.') + +# Then use the following line where and when you want to drop into the +# IPython shell: +# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') + +# +# Globals +# + +ExternalEncoding = 'ascii' + +# +# Support/utility functions. +# + +def showIndent(outfile, level): + for idx in range(level): + outfile.write(' ') + +def quote_xml(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + return s1 + +def quote_attrib(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + if '"' in s1: + if "'" in s1: + s1 = '"%s"' % s1.replace('"', """) + else: + s1 = "'%s'" % s1 + else: + s1 = '"%s"' % s1 + return s1 + +def quote_python(inStr): + s1 = inStr + if s1.find("'") == -1: + if s1.find('\n') == -1: + return "'%s'" % s1 + else: + return "'''%s'''" % s1 + else: + if s1.find('"') != -1: + s1 = s1.replace('"', '\\"') + if s1.find('\n') == -1: + return '"%s"' % s1 + else: + return '"""%s"""' % s1 + + +class MixedContainer: + # Constants for category: + CategoryNone = 0 + CategoryText = 1 + CategorySimple = 2 + CategoryComplex = 3 + # Constants for content_type: + TypeNone = 0 + TypeText = 1 + TypeString = 2 + TypeInteger = 3 + TypeFloat = 4 + TypeDecimal = 5 + TypeDouble = 6 + TypeBoolean = 7 + def __init__(self, category, content_type, name, value): + self.category = category + self.content_type = content_type + self.name = name + self.value = value + def getCategory(self): + return self.category + def getContenttype(self, content_type): + return self.content_type + def getValue(self): + return self.value + def getName(self): + return self.name + def export(self, outfile, level, name, namespace): + if self.category == MixedContainer.CategoryText: + outfile.write(self.value) + elif self.category == MixedContainer.CategorySimple: + self.exportSimple(outfile, level, name) + else: # category == MixedContainer.CategoryComplex + self.value.export(outfile, level, namespace,name) + def exportSimple(self, outfile, level, name): + if self.content_type == MixedContainer.TypeString: + outfile.write('<%s>%s' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeInteger or \ + self.content_type == MixedContainer.TypeBoolean: + outfile.write('<%s>%d' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeFloat or \ + self.content_type == MixedContainer.TypeDecimal: + outfile.write('<%s>%f' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): + if self.category == MixedContainer.CategoryText: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + elif self.category == MixedContainer.CategorySimple: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + else: # category == MixedContainer.CategoryComplex + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s",\n' % \ + (self.category, self.content_type, self.name,)) + self.value.exportLiteral(outfile, level + 1) + showIndent(outfile, level) + outfile.write(')\n') + + +class _MemberSpec(object): + def __init__(self, name='', data_type='', container=0): + self.name = name + self.data_type = data_type + self.container = container + def set_name(self, name): self.name = name + def get_name(self): return self.name + def set_data_type(self, data_type): self.data_type = data_type + def get_data_type(self): return self.data_type + def set_container(self, container): self.container = container + def get_container(self): return self.container + + +# +# Data representation classes. +# + +class DoxygenType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, version=None, compounddef=None): + self.version = version + self.compounddef = compounddef + def factory(*args_, **kwargs_): + if DoxygenType.subclass: + return DoxygenType.subclass(*args_, **kwargs_) + else: + return DoxygenType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compounddef(self): return self.compounddef + def set_compounddef(self, compounddef): self.compounddef = compounddef + def get_version(self): return self.version + def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): + outfile.write(' version=%s' % (quote_attrib(self.version), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): + if self.compounddef: + self.compounddef.export(outfile, level, namespace_, name_='compounddef') + def hasContent_(self): + if ( + self.compounddef is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.version is not None: + showIndent(outfile, level) + outfile.write('version = "%s",\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.compounddef: + showIndent(outfile, level) + outfile.write('compounddef=model_.compounddefType(\n') + self.compounddef.exportLiteral(outfile, level, name_='compounddef') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('version'): + self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compounddef': + obj_ = compounddefType.factory() + obj_.build(child_) + self.set_compounddef(obj_) +# end class DoxygenType + + +class compounddefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): + self.kind = kind + self.prot = prot + self.id = id + self.compoundname = compoundname + self.title = title + if basecompoundref is None: + self.basecompoundref = [] + else: + self.basecompoundref = basecompoundref + if derivedcompoundref is None: + self.derivedcompoundref = [] + else: + self.derivedcompoundref = derivedcompoundref + if includes is None: + self.includes = [] + else: + self.includes = includes + if includedby is None: + self.includedby = [] + else: + self.includedby = includedby + self.incdepgraph = incdepgraph + self.invincdepgraph = invincdepgraph + if innerdir is None: + self.innerdir = [] + else: + self.innerdir = innerdir + if innerfile is None: + self.innerfile = [] + else: + self.innerfile = innerfile + if innerclass is None: + self.innerclass = [] + else: + self.innerclass = innerclass + if innernamespace is None: + self.innernamespace = [] + else: + self.innernamespace = innernamespace + if innerpage is None: + self.innerpage = [] + else: + self.innerpage = innerpage + if innergroup is None: + self.innergroup = [] + else: + self.innergroup = innergroup + self.templateparamlist = templateparamlist + if sectiondef is None: + self.sectiondef = [] + else: + self.sectiondef = sectiondef + self.briefdescription = briefdescription + self.detaileddescription = detaileddescription + self.inheritancegraph = inheritancegraph + self.collaborationgraph = collaborationgraph + self.programlisting = programlisting + self.location = location + self.listofallmembers = listofallmembers + def factory(*args_, **kwargs_): + if compounddefType.subclass: + return compounddefType.subclass(*args_, **kwargs_) + else: + return compounddefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compoundname(self): return self.compoundname + def set_compoundname(self, compoundname): self.compoundname = compoundname + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_basecompoundref(self): return self.basecompoundref + def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref + def add_basecompoundref(self, value): self.basecompoundref.append(value) + def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value + def get_derivedcompoundref(self): return self.derivedcompoundref + def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref + def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) + def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value + def get_includes(self): return self.includes + def set_includes(self, includes): self.includes = includes + def add_includes(self, value): self.includes.append(value) + def insert_includes(self, index, value): self.includes[index] = value + def get_includedby(self): return self.includedby + def set_includedby(self, includedby): self.includedby = includedby + def add_includedby(self, value): self.includedby.append(value) + def insert_includedby(self, index, value): self.includedby[index] = value + def get_incdepgraph(self): return self.incdepgraph + def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph + def get_invincdepgraph(self): return self.invincdepgraph + def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph + def get_innerdir(self): return self.innerdir + def set_innerdir(self, innerdir): self.innerdir = innerdir + def add_innerdir(self, value): self.innerdir.append(value) + def insert_innerdir(self, index, value): self.innerdir[index] = value + def get_innerfile(self): return self.innerfile + def set_innerfile(self, innerfile): self.innerfile = innerfile + def add_innerfile(self, value): self.innerfile.append(value) + def insert_innerfile(self, index, value): self.innerfile[index] = value + def get_innerclass(self): return self.innerclass + def set_innerclass(self, innerclass): self.innerclass = innerclass + def add_innerclass(self, value): self.innerclass.append(value) + def insert_innerclass(self, index, value): self.innerclass[index] = value + def get_innernamespace(self): return self.innernamespace + def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace + def add_innernamespace(self, value): self.innernamespace.append(value) + def insert_innernamespace(self, index, value): self.innernamespace[index] = value + def get_innerpage(self): return self.innerpage + def set_innerpage(self, innerpage): self.innerpage = innerpage + def add_innerpage(self, value): self.innerpage.append(value) + def insert_innerpage(self, index, value): self.innerpage[index] = value + def get_innergroup(self): return self.innergroup + def set_innergroup(self, innergroup): self.innergroup = innergroup + def add_innergroup(self, value): self.innergroup.append(value) + def insert_innergroup(self, index, value): self.innergroup[index] = value + def get_templateparamlist(self): return self.templateparamlist + def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def get_sectiondef(self): return self.sectiondef + def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef + def add_sectiondef(self, value): self.sectiondef.append(value) + def insert_sectiondef(self, index, value): self.sectiondef[index] = value + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_inheritancegraph(self): return self.inheritancegraph + def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph + def get_collaborationgraph(self): return self.collaborationgraph + def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph + def get_programlisting(self): return self.programlisting + def set_programlisting(self, programlisting): self.programlisting = programlisting + def get_location(self): return self.location + def set_location(self, location): self.location = location + def get_listofallmembers(self): return self.listofallmembers + def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='compounddefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): + if self.compoundname is not None: + showIndent(outfile, level) + outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) + if self.title is not None: + showIndent(outfile, level) + outfile.write('<%stitle>%s\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) + for basecompoundref_ in self.basecompoundref: + basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') + for derivedcompoundref_ in self.derivedcompoundref: + derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') + for includes_ in self.includes: + includes_.export(outfile, level, namespace_, name_='includes') + for includedby_ in self.includedby: + includedby_.export(outfile, level, namespace_, name_='includedby') + if self.incdepgraph: + self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') + if self.invincdepgraph: + self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') + for innerdir_ in self.innerdir: + innerdir_.export(outfile, level, namespace_, name_='innerdir') + for innerfile_ in self.innerfile: + innerfile_.export(outfile, level, namespace_, name_='innerfile') + for innerclass_ in self.innerclass: + innerclass_.export(outfile, level, namespace_, name_='innerclass') + for innernamespace_ in self.innernamespace: + innernamespace_.export(outfile, level, namespace_, name_='innernamespace') + for innerpage_ in self.innerpage: + innerpage_.export(outfile, level, namespace_, name_='innerpage') + for innergroup_ in self.innergroup: + innergroup_.export(outfile, level, namespace_, name_='innergroup') + if self.templateparamlist: + self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + for sectiondef_ in self.sectiondef: + sectiondef_.export(outfile, level, namespace_, name_='sectiondef') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + if self.detaileddescription: + self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + if self.inheritancegraph: + self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') + if self.collaborationgraph: + self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') + if self.programlisting: + self.programlisting.export(outfile, level, namespace_, name_='programlisting') + if self.location: + self.location.export(outfile, level, namespace_, name_='location') + if self.listofallmembers: + self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') + def hasContent_(self): + if ( + self.compoundname is not None or + self.title is not None or + self.basecompoundref is not None or + self.derivedcompoundref is not None or + self.includes is not None or + self.includedby is not None or + self.incdepgraph is not None or + self.invincdepgraph is not None or + self.innerdir is not None or + self.innerfile is not None or + self.innerclass is not None or + self.innernamespace is not None or + self.innerpage is not None or + self.innergroup is not None or + self.templateparamlist is not None or + self.sectiondef is not None or + self.briefdescription is not None or + self.detaileddescription is not None or + self.inheritancegraph is not None or + self.collaborationgraph is not None or + self.programlisting is not None or + self.location is not None or + self.listofallmembers is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='compounddefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) + if self.title: + showIndent(outfile, level) + outfile.write('title=model_.xsd_string(\n') + self.title.exportLiteral(outfile, level, name_='title') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('basecompoundref=[\n') + level += 1 + for basecompoundref in self.basecompoundref: + showIndent(outfile, level) + outfile.write('model_.basecompoundref(\n') + basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('derivedcompoundref=[\n') + level += 1 + for derivedcompoundref in self.derivedcompoundref: + showIndent(outfile, level) + outfile.write('model_.derivedcompoundref(\n') + derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('includes=[\n') + level += 1 + for includes in self.includes: + showIndent(outfile, level) + outfile.write('model_.includes(\n') + includes.exportLiteral(outfile, level, name_='includes') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('includedby=[\n') + level += 1 + for includedby in self.includedby: + showIndent(outfile, level) + outfile.write('model_.includedby(\n') + includedby.exportLiteral(outfile, level, name_='includedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.incdepgraph: + showIndent(outfile, level) + outfile.write('incdepgraph=model_.graphType(\n') + self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.invincdepgraph: + showIndent(outfile, level) + outfile.write('invincdepgraph=model_.graphType(\n') + self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('innerdir=[\n') + level += 1 + for innerdir in self.innerdir: + showIndent(outfile, level) + outfile.write('model_.innerdir(\n') + innerdir.exportLiteral(outfile, level, name_='innerdir') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerfile=[\n') + level += 1 + for innerfile in self.innerfile: + showIndent(outfile, level) + outfile.write('model_.innerfile(\n') + innerfile.exportLiteral(outfile, level, name_='innerfile') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerclass=[\n') + level += 1 + for innerclass in self.innerclass: + showIndent(outfile, level) + outfile.write('model_.innerclass(\n') + innerclass.exportLiteral(outfile, level, name_='innerclass') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innernamespace=[\n') + level += 1 + for innernamespace in self.innernamespace: + showIndent(outfile, level) + outfile.write('model_.innernamespace(\n') + innernamespace.exportLiteral(outfile, level, name_='innernamespace') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerpage=[\n') + level += 1 + for innerpage in self.innerpage: + showIndent(outfile, level) + outfile.write('model_.innerpage(\n') + innerpage.exportLiteral(outfile, level, name_='innerpage') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innergroup=[\n') + level += 1 + for innergroup in self.innergroup: + showIndent(outfile, level) + outfile.write('model_.innergroup(\n') + innergroup.exportLiteral(outfile, level, name_='innergroup') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.templateparamlist: + showIndent(outfile, level) + outfile.write('templateparamlist=model_.templateparamlistType(\n') + self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('sectiondef=[\n') + level += 1 + for sectiondef in self.sectiondef: + showIndent(outfile, level) + outfile.write('model_.sectiondef(\n') + sectiondef.exportLiteral(outfile, level, name_='sectiondef') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.detaileddescription: + showIndent(outfile, level) + outfile.write('detaileddescription=model_.descriptionType(\n') + self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.inheritancegraph: + showIndent(outfile, level) + outfile.write('inheritancegraph=model_.graphType(\n') + self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.collaborationgraph: + showIndent(outfile, level) + outfile.write('collaborationgraph=model_.graphType(\n') + self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.programlisting: + showIndent(outfile, level) + outfile.write('programlisting=model_.listingType(\n') + self.programlisting.exportLiteral(outfile, level, name_='programlisting') + showIndent(outfile, level) + outfile.write('),\n') + if self.location: + showIndent(outfile, level) + outfile.write('location=model_.locationType(\n') + self.location.exportLiteral(outfile, level, name_='location') + showIndent(outfile, level) + outfile.write('),\n') + if self.listofallmembers: + showIndent(outfile, level) + outfile.write('listofallmembers=model_.listofallmembersType(\n') + self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compoundname': + compoundname_ = '' + for text__content_ in child_.childNodes: + compoundname_ += text__content_.nodeValue + self.compoundname = compoundname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_title(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'basecompoundref': + obj_ = compoundRefType.factory() + obj_.build(child_) + self.basecompoundref.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'derivedcompoundref': + obj_ = compoundRefType.factory() + obj_.build(child_) + self.derivedcompoundref.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'includes': + obj_ = incType.factory() + obj_.build(child_) + self.includes.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'includedby': + obj_ = incType.factory() + obj_.build(child_) + self.includedby.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'incdepgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_incdepgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'invincdepgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_invincdepgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerdir': + obj_ = refType.factory() + obj_.build(child_) + self.innerdir.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerfile': + obj_ = refType.factory() + obj_.build(child_) + self.innerfile.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerclass': + obj_ = refType.factory() + obj_.build(child_) + self.innerclass.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innernamespace': + obj_ = refType.factory() + obj_.build(child_) + self.innernamespace.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerpage': + obj_ = refType.factory() + obj_.build(child_) + self.innerpage.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innergroup': + obj_ = refType.factory() + obj_.build(child_) + self.innergroup.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'templateparamlist': + obj_ = templateparamlistType.factory() + obj_.build(child_) + self.set_templateparamlist(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sectiondef': + obj_ = sectiondefType.factory() + obj_.build(child_) + self.sectiondef.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_detaileddescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'inheritancegraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_inheritancegraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'collaborationgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_collaborationgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'programlisting': + obj_ = listingType.factory() + obj_.build(child_) + self.set_programlisting(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'location': + obj_ = locationType.factory() + obj_.build(child_) + self.set_location(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'listofallmembers': + obj_ = listofallmembersType.factory() + obj_.build(child_) + self.set_listofallmembers(obj_) +# end class compounddefType + + +class listofallmembersType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, member=None): + if member is None: + self.member = [] + else: + self.member = member + def factory(*args_, **kwargs_): + if listofallmembersType.subclass: + return listofallmembersType.subclass(*args_, **kwargs_) + else: + return listofallmembersType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value + def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): + for member_ in self.member: + member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): + if ( + self.member is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='listofallmembersType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('member=[\n') + level += 1 + for member in self.member: + showIndent(outfile, level) + outfile.write('model_.member(\n') + member.exportLiteral(outfile, level, name_='member') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'member': + obj_ = memberRefType.factory() + obj_.build(child_) + self.member.append(obj_) +# end class listofallmembersType + + +class memberRefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): + self.virt = virt + self.prot = prot + self.refid = refid + self.ambiguityscope = ambiguityscope + self.scope = scope + self.name = name + def factory(*args_, **kwargs_): + if memberRefType.subclass: + return memberRefType.subclass(*args_, **kwargs_) + else: + return memberRefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_scope(self): return self.scope + def set_scope(self, scope): self.scope = scope + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_ambiguityscope(self): return self.ambiguityscope + def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope + def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='memberRefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.ambiguityscope is not None: + outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): + if self.scope is not None: + showIndent(outfile, level) + outfile.write('<%sscope>%s\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): + if ( + self.scope is not None or + self.name is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='memberRefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.ambiguityscope is not None: + showIndent(outfile, level) + outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('ambiguityscope'): + self.ambiguityscope = attrs.get('ambiguityscope').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'scope': + scope_ = '' + for text__content_ in child_.childNodes: + scope_ += text__content_.nodeValue + self.scope = scope_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class memberRefType + + +class scope(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if scope.subclass: + return scope.subclass(*args_, **kwargs_) + else: + return scope(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='scope') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='scope'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='scope'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='scope'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class scope + + +class name(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if name.subclass: + return name.subclass(*args_, **kwargs_) + else: + return name(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='name') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='name'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='name'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='name'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class name + + +class compoundRefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.virt = virt + self.prot = prot + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if compoundRefType.subclass: + return compoundRefType.subclass(*args_, **kwargs_) + else: + return compoundRefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='compoundRefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class compoundRefType + + +class reimplementType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if reimplementType.subclass: + return reimplementType.subclass(*args_, **kwargs_) + else: + return reimplementType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='reimplementType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='reimplementType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class reimplementType + + +class incType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.local = local + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if incType.subclass: + return incType.subclass(*args_, **kwargs_) + else: + return incType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_local(self): return self.local + def set_local(self, local): self.local = local + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='incType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='incType'): + if self.local is not None: + outfile.write(' local=%s' % (quote_attrib(self.local), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='incType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='incType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.local is not None: + showIndent(outfile, level) + outfile.write('local = "%s",\n' % (self.local,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('local'): + self.local = attrs.get('local').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class incType + + +class refType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.prot = prot + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if refType.subclass: + return refType.subclass(*args_, **kwargs_) + else: + return refType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='refType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refType'): + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='refType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class refType + + +class refTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + self.kindref = kindref + self.external = external + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if refTextType.subclass: + return refTextType.subclass(*args_, **kwargs_) + else: + return refTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_kindref(self): return self.kindref + def set_kindref(self, kindref): self.kindref = kindref + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='refTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.kindref is not None: + outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='refTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.kindref is not None: + showIndent(outfile, level) + outfile.write('kindref = "%s",\n' % (self.kindref,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('kindref'): + self.kindref = attrs.get('kindref').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class refTextType + + +class sectiondefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, header=None, description=None, memberdef=None): + self.kind = kind + self.header = header + self.description = description + if memberdef is None: + self.memberdef = [] + else: + self.memberdef = memberdef + def factory(*args_, **kwargs_): + if sectiondefType.subclass: + return sectiondefType.subclass(*args_, **kwargs_) + else: + return sectiondefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_header(self): return self.header + def set_header(self, header): self.header = header + def get_description(self): return self.description + def set_description(self, description): self.description = description + def get_memberdef(self): return self.memberdef + def set_memberdef(self, memberdef): self.memberdef = memberdef + def add_memberdef(self, value): self.memberdef.append(value) + def insert_memberdef(self, index, value): self.memberdef[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): + if self.header is not None: + showIndent(outfile, level) + outfile.write('<%sheader>%s\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) + if self.description: + self.description.export(outfile, level, namespace_, name_='description') + for memberdef_ in self.memberdef: + memberdef_.export(outfile, level, namespace_, name_='memberdef') + def hasContent_(self): + if ( + self.header is not None or + self.description is not None or + self.memberdef is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='sectiondefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) + if self.description: + showIndent(outfile, level) + outfile.write('description=model_.descriptionType(\n') + self.description.exportLiteral(outfile, level, name_='description') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('memberdef=[\n') + level += 1 + for memberdef in self.memberdef: + showIndent(outfile, level) + outfile.write('model_.memberdef(\n') + memberdef.exportLiteral(outfile, level, name_='memberdef') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'header': + header_ = '' + for text__content_ in child_.childNodes: + header_ += text__content_.nodeValue + self.header = header_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'description': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_description(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'memberdef': + obj_ = memberdefType.factory() + obj_.build(child_) + self.memberdef.append(obj_) +# end class sectiondefType + + +class memberdefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): + self.initonly = initonly + self.kind = kind + self.volatile = volatile + self.const = const + self.raisexx = raisexx + self.virt = virt + self.readable = readable + self.prot = prot + self.explicit = explicit + self.new = new + self.final = final + self.writable = writable + self.add = add + self.static = static + self.remove = remove + self.sealed = sealed + self.mutable = mutable + self.gettable = gettable + self.inline = inline + self.settable = settable + self.id = id + self.templateparamlist = templateparamlist + self.type_ = type_ + self.definition = definition + self.argsstring = argsstring + self.name = name + self.read = read + self.write = write + self.bitfield = bitfield + if reimplements is None: + self.reimplements = [] + else: + self.reimplements = reimplements + if reimplementedby is None: + self.reimplementedby = [] + else: + self.reimplementedby = reimplementedby + if param is None: + self.param = [] + else: + self.param = param + if enumvalue is None: + self.enumvalue = [] + else: + self.enumvalue = enumvalue + self.initializer = initializer + self.exceptions = exceptions + self.briefdescription = briefdescription + self.detaileddescription = detaileddescription + self.inbodydescription = inbodydescription + self.location = location + if references is None: + self.references = [] + else: + self.references = references + if referencedby is None: + self.referencedby = [] + else: + self.referencedby = referencedby + def factory(*args_, **kwargs_): + if memberdefType.subclass: + return memberdefType.subclass(*args_, **kwargs_) + else: + return memberdefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_templateparamlist(self): return self.templateparamlist + def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_definition(self): return self.definition + def set_definition(self, definition): self.definition = definition + def get_argsstring(self): return self.argsstring + def set_argsstring(self, argsstring): self.argsstring = argsstring + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_read(self): return self.read + def set_read(self, read): self.read = read + def get_write(self): return self.write + def set_write(self, write): self.write = write + def get_bitfield(self): return self.bitfield + def set_bitfield(self, bitfield): self.bitfield = bitfield + def get_reimplements(self): return self.reimplements + def set_reimplements(self, reimplements): self.reimplements = reimplements + def add_reimplements(self, value): self.reimplements.append(value) + def insert_reimplements(self, index, value): self.reimplements[index] = value + def get_reimplementedby(self): return self.reimplementedby + def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby + def add_reimplementedby(self, value): self.reimplementedby.append(value) + def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value + def get_param(self): return self.param + def set_param(self, param): self.param = param + def add_param(self, value): self.param.append(value) + def insert_param(self, index, value): self.param[index] = value + def get_enumvalue(self): return self.enumvalue + def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue + def add_enumvalue(self, value): self.enumvalue.append(value) + def insert_enumvalue(self, index, value): self.enumvalue[index] = value + def get_initializer(self): return self.initializer + def set_initializer(self, initializer): self.initializer = initializer + def get_exceptions(self): return self.exceptions + def set_exceptions(self, exceptions): self.exceptions = exceptions + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_inbodydescription(self): return self.inbodydescription + def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription + def get_location(self): return self.location + def set_location(self, location): self.location = location + def get_references(self): return self.references + def set_references(self, references): self.references = references + def add_references(self, value): self.references.append(value) + def insert_references(self, index, value): self.references[index] = value + def get_referencedby(self): return self.referencedby + def set_referencedby(self, referencedby): self.referencedby = referencedby + def add_referencedby(self, value): self.referencedby.append(value) + def insert_referencedby(self, index, value): self.referencedby[index] = value + def get_initonly(self): return self.initonly + def set_initonly(self, initonly): self.initonly = initonly + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_volatile(self): return self.volatile + def set_volatile(self, volatile): self.volatile = volatile + def get_const(self): return self.const + def set_const(self, const): self.const = const + def get_raise(self): return self.raisexx + def set_raise(self, raisexx): self.raisexx = raisexx + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_readable(self): return self.readable + def set_readable(self, readable): self.readable = readable + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_explicit(self): return self.explicit + def set_explicit(self, explicit): self.explicit = explicit + def get_new(self): return self.new + def set_new(self, new): self.new = new + def get_final(self): return self.final + def set_final(self, final): self.final = final + def get_writable(self): return self.writable + def set_writable(self, writable): self.writable = writable + def get_add(self): return self.add + def set_add(self, add): self.add = add + def get_static(self): return self.static + def set_static(self, static): self.static = static + def get_remove(self): return self.remove + def set_remove(self, remove): self.remove = remove + def get_sealed(self): return self.sealed + def set_sealed(self, sealed): self.sealed = sealed + def get_mutable(self): return self.mutable + def set_mutable(self, mutable): self.mutable = mutable + def get_gettable(self): return self.gettable + def set_gettable(self, gettable): self.gettable = gettable + def get_inline(self): return self.inline + def set_inline(self, inline): self.inline = inline + def get_settable(self): return self.settable + def set_settable(self, settable): self.settable = settable + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='memberdefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): + if self.initonly is not None: + outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + if self.volatile is not None: + outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) + if self.const is not None: + outfile.write(' const=%s' % (quote_attrib(self.const), )) + if self.raisexx is not None: + outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.readable is not None: + outfile.write(' readable=%s' % (quote_attrib(self.readable), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.explicit is not None: + outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) + if self.new is not None: + outfile.write(' new=%s' % (quote_attrib(self.new), )) + if self.final is not None: + outfile.write(' final=%s' % (quote_attrib(self.final), )) + if self.writable is not None: + outfile.write(' writable=%s' % (quote_attrib(self.writable), )) + if self.add is not None: + outfile.write(' add=%s' % (quote_attrib(self.add), )) + if self.static is not None: + outfile.write(' static=%s' % (quote_attrib(self.static), )) + if self.remove is not None: + outfile.write(' remove=%s' % (quote_attrib(self.remove), )) + if self.sealed is not None: + outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) + if self.mutable is not None: + outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) + if self.gettable is not None: + outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) + if self.inline is not None: + outfile.write(' inline=%s' % (quote_attrib(self.inline), )) + if self.settable is not None: + outfile.write(' settable=%s' % (quote_attrib(self.settable), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): + if self.templateparamlist: + self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + if self.type_: + self.type_.export(outfile, level, namespace_, name_='type') + if self.definition is not None: + showIndent(outfile, level) + outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) + if self.argsstring is not None: + showIndent(outfile, level) + outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + if self.read is not None: + showIndent(outfile, level) + outfile.write('<%sread>%s\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) + if self.write is not None: + showIndent(outfile, level) + outfile.write('<%swrite>%s\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) + if self.bitfield is not None: + showIndent(outfile, level) + outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) + for reimplements_ in self.reimplements: + reimplements_.export(outfile, level, namespace_, name_='reimplements') + for reimplementedby_ in self.reimplementedby: + reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') + for param_ in self.param: + param_.export(outfile, level, namespace_, name_='param') + for enumvalue_ in self.enumvalue: + enumvalue_.export(outfile, level, namespace_, name_='enumvalue') + if self.initializer: + self.initializer.export(outfile, level, namespace_, name_='initializer') + if self.exceptions: + self.exceptions.export(outfile, level, namespace_, name_='exceptions') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + if self.detaileddescription: + self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + if self.inbodydescription: + self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') + if self.location: + self.location.export(outfile, level, namespace_, name_='location', ) + for references_ in self.references: + references_.export(outfile, level, namespace_, name_='references') + for referencedby_ in self.referencedby: + referencedby_.export(outfile, level, namespace_, name_='referencedby') + def hasContent_(self): + if ( + self.templateparamlist is not None or + self.type_ is not None or + self.definition is not None or + self.argsstring is not None or + self.name is not None or + self.read is not None or + self.write is not None or + self.bitfield is not None or + self.reimplements is not None or + self.reimplementedby is not None or + self.param is not None or + self.enumvalue is not None or + self.initializer is not None or + self.exceptions is not None or + self.briefdescription is not None or + self.detaileddescription is not None or + self.inbodydescription is not None or + self.location is not None or + self.references is not None or + self.referencedby is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='memberdefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.initonly is not None: + showIndent(outfile, level) + outfile.write('initonly = "%s",\n' % (self.initonly,)) + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.volatile is not None: + showIndent(outfile, level) + outfile.write('volatile = "%s",\n' % (self.volatile,)) + if self.const is not None: + showIndent(outfile, level) + outfile.write('const = "%s",\n' % (self.const,)) + if self.raisexx is not None: + showIndent(outfile, level) + outfile.write('raisexx = "%s",\n' % (self.raisexx,)) + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.readable is not None: + showIndent(outfile, level) + outfile.write('readable = "%s",\n' % (self.readable,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.explicit is not None: + showIndent(outfile, level) + outfile.write('explicit = "%s",\n' % (self.explicit,)) + if self.new is not None: + showIndent(outfile, level) + outfile.write('new = "%s",\n' % (self.new,)) + if self.final is not None: + showIndent(outfile, level) + outfile.write('final = "%s",\n' % (self.final,)) + if self.writable is not None: + showIndent(outfile, level) + outfile.write('writable = "%s",\n' % (self.writable,)) + if self.add is not None: + showIndent(outfile, level) + outfile.write('add = "%s",\n' % (self.add,)) + if self.static is not None: + showIndent(outfile, level) + outfile.write('static = "%s",\n' % (self.static,)) + if self.remove is not None: + showIndent(outfile, level) + outfile.write('remove = "%s",\n' % (self.remove,)) + if self.sealed is not None: + showIndent(outfile, level) + outfile.write('sealed = "%s",\n' % (self.sealed,)) + if self.mutable is not None: + showIndent(outfile, level) + outfile.write('mutable = "%s",\n' % (self.mutable,)) + if self.gettable is not None: + showIndent(outfile, level) + outfile.write('gettable = "%s",\n' % (self.gettable,)) + if self.inline is not None: + showIndent(outfile, level) + outfile.write('inline = "%s",\n' % (self.inline,)) + if self.settable is not None: + showIndent(outfile, level) + outfile.write('settable = "%s",\n' % (self.settable,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.templateparamlist: + showIndent(outfile, level) + outfile.write('templateparamlist=model_.templateparamlistType(\n') + self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + showIndent(outfile, level) + outfile.write('),\n') + if self.type_: + showIndent(outfile, level) + outfile.write('type_=model_.linkedTextType(\n') + self.type_.exportLiteral(outfile, level, name_='type') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('reimplements=[\n') + level += 1 + for reimplements in self.reimplements: + showIndent(outfile, level) + outfile.write('model_.reimplements(\n') + reimplements.exportLiteral(outfile, level, name_='reimplements') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('reimplementedby=[\n') + level += 1 + for reimplementedby in self.reimplementedby: + showIndent(outfile, level) + outfile.write('model_.reimplementedby(\n') + reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('param=[\n') + level += 1 + for param in self.param: + showIndent(outfile, level) + outfile.write('model_.param(\n') + param.exportLiteral(outfile, level, name_='param') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('enumvalue=[\n') + level += 1 + for enumvalue in self.enumvalue: + showIndent(outfile, level) + outfile.write('model_.enumvalue(\n') + enumvalue.exportLiteral(outfile, level, name_='enumvalue') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.initializer: + showIndent(outfile, level) + outfile.write('initializer=model_.linkedTextType(\n') + self.initializer.exportLiteral(outfile, level, name_='initializer') + showIndent(outfile, level) + outfile.write('),\n') + if self.exceptions: + showIndent(outfile, level) + outfile.write('exceptions=model_.linkedTextType(\n') + self.exceptions.exportLiteral(outfile, level, name_='exceptions') + showIndent(outfile, level) + outfile.write('),\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.detaileddescription: + showIndent(outfile, level) + outfile.write('detaileddescription=model_.descriptionType(\n') + self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.inbodydescription: + showIndent(outfile, level) + outfile.write('inbodydescription=model_.descriptionType(\n') + self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.location: + showIndent(outfile, level) + outfile.write('location=model_.locationType(\n') + self.location.exportLiteral(outfile, level, name_='location') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('references=[\n') + level += 1 + for references in self.references: + showIndent(outfile, level) + outfile.write('model_.references(\n') + references.exportLiteral(outfile, level, name_='references') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('referencedby=[\n') + level += 1 + for referencedby in self.referencedby: + showIndent(outfile, level) + outfile.write('model_.referencedby(\n') + referencedby.exportLiteral(outfile, level, name_='referencedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('initonly'): + self.initonly = attrs.get('initonly').value + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('volatile'): + self.volatile = attrs.get('volatile').value + if attrs.get('const'): + self.const = attrs.get('const').value + if attrs.get('raise'): + self.raisexx = attrs.get('raise').value + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('readable'): + self.readable = attrs.get('readable').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('explicit'): + self.explicit = attrs.get('explicit').value + if attrs.get('new'): + self.new = attrs.get('new').value + if attrs.get('final'): + self.final = attrs.get('final').value + if attrs.get('writable'): + self.writable = attrs.get('writable').value + if attrs.get('add'): + self.add = attrs.get('add').value + if attrs.get('static'): + self.static = attrs.get('static').value + if attrs.get('remove'): + self.remove = attrs.get('remove').value + if attrs.get('sealed'): + self.sealed = attrs.get('sealed').value + if attrs.get('mutable'): + self.mutable = attrs.get('mutable').value + if attrs.get('gettable'): + self.gettable = attrs.get('gettable').value + if attrs.get('inline'): + self.inline = attrs.get('inline').value + if attrs.get('settable'): + self.settable = attrs.get('settable').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'templateparamlist': + obj_ = templateparamlistType.factory() + obj_.build(child_) + self.set_templateparamlist(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'type': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_type(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'definition': + definition_ = '' + for text__content_ in child_.childNodes: + definition_ += text__content_.nodeValue + self.definition = definition_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'argsstring': + argsstring_ = '' + for text__content_ in child_.childNodes: + argsstring_ += text__content_.nodeValue + self.argsstring = argsstring_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'read': + read_ = '' + for text__content_ in child_.childNodes: + read_ += text__content_.nodeValue + self.read = read_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'write': + write_ = '' + for text__content_ in child_.childNodes: + write_ += text__content_.nodeValue + self.write = write_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'bitfield': + bitfield_ = '' + for text__content_ in child_.childNodes: + bitfield_ += text__content_.nodeValue + self.bitfield = bitfield_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'reimplements': + obj_ = reimplementType.factory() + obj_.build(child_) + self.reimplements.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'reimplementedby': + obj_ = reimplementType.factory() + obj_.build(child_) + self.reimplementedby.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'param': + obj_ = paramType.factory() + obj_.build(child_) + self.param.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'enumvalue': + obj_ = enumvalueType.factory() + obj_.build(child_) + self.enumvalue.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'initializer': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_initializer(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'exceptions': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_exceptions(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_detaileddescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'inbodydescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_inbodydescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'location': + obj_ = locationType.factory() + obj_.build(child_) + self.set_location(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'references': + obj_ = referenceType.factory() + obj_.build(child_) + self.references.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'referencedby': + obj_ = referenceType.factory() + obj_.build(child_) + self.referencedby.append(obj_) +# end class memberdefType + + +class definition(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if definition.subclass: + return definition.subclass(*args_, **kwargs_) + else: + return definition(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='definition') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='definition'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='definition'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='definition'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class definition + + +class argsstring(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if argsstring.subclass: + return argsstring.subclass(*args_, **kwargs_) + else: + return argsstring(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='argsstring') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='argsstring'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class argsstring + + +class read(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if read.subclass: + return read.subclass(*args_, **kwargs_) + else: + return read(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='read') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='read'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='read'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='read'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class read + + +class write(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if write.subclass: + return write.subclass(*args_, **kwargs_) + else: + return write(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='write') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='write'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='write'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='write'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class write + + +class bitfield(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if bitfield.subclass: + return bitfield.subclass(*args_, **kwargs_) + else: + return bitfield(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='bitfield') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='bitfield'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class bitfield + + +class descriptionType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if descriptionType.subclass: + return descriptionType.subclass(*args_, **kwargs_) + else: + return descriptionType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='descriptionType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect1 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='descriptionType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + childobj_ = docSect1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect1', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class descriptionType + + +class enumvalueType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): + self.prot = prot + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if enumvalueType.subclass: + return enumvalueType.subclass(*args_, **kwargs_) + else: + return enumvalueType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_initializer(self): return self.initializer + def set_initializer(self, initializer): self.initializer = initializer + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.name is not None or + self.initializer is not None or + self.briefdescription is not None or + self.detaileddescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='enumvalueType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + value_ = [] + for text_ in child_.childNodes: + value_.append(text_.nodeValue) + valuestr_ = ''.join(value_) + obj_ = self.mixedclass_(MixedContainer.CategorySimple, + MixedContainer.TypeString, 'name', valuestr_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'initializer': + childobj_ = linkedTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'initializer', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + childobj_ = descriptionType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'briefdescription', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + childobj_ = descriptionType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'detaileddescription', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class enumvalueType + + +class templateparamlistType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, param=None): + if param is None: + self.param = [] + else: + self.param = param + def factory(*args_, **kwargs_): + if templateparamlistType.subclass: + return templateparamlistType.subclass(*args_, **kwargs_) + else: + return templateparamlistType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_param(self): return self.param + def set_param(self, param): self.param = param + def add_param(self, value): self.param.append(value) + def insert_param(self, index, value): self.param[index] = value + def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): + for param_ in self.param: + param_.export(outfile, level, namespace_, name_='param') + def hasContent_(self): + if ( + self.param is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='templateparamlistType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('param=[\n') + level += 1 + for param in self.param: + showIndent(outfile, level) + outfile.write('model_.param(\n') + param.exportLiteral(outfile, level, name_='param') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'param': + obj_ = paramType.factory() + obj_.build(child_) + self.param.append(obj_) +# end class templateparamlistType + + +class paramType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): + self.type_ = type_ + self.declname = declname + self.defname = defname + self.array = array + self.defval = defval + self.briefdescription = briefdescription + def factory(*args_, **kwargs_): + if paramType.subclass: + return paramType.subclass(*args_, **kwargs_) + else: + return paramType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_declname(self): return self.declname + def set_declname(self, declname): self.declname = declname + def get_defname(self): return self.defname + def set_defname(self, defname): self.defname = defname + def get_array(self): return self.array + def set_array(self, array): self.array = array + def get_defval(self): return self.defval + def set_defval(self, defval): self.defval = defval + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='paramType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='paramType'): + if self.type_: + self.type_.export(outfile, level, namespace_, name_='type') + if self.declname is not None: + showIndent(outfile, level) + outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) + if self.defname is not None: + showIndent(outfile, level) + outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) + if self.array is not None: + showIndent(outfile, level) + outfile.write('<%sarray>%s\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) + if self.defval: + self.defval.export(outfile, level, namespace_, name_='defval') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + def hasContent_(self): + if ( + self.type_ is not None or + self.declname is not None or + self.defname is not None or + self.array is not None or + self.defval is not None or + self.briefdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='paramType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + if self.type_: + showIndent(outfile, level) + outfile.write('type_=model_.linkedTextType(\n') + self.type_.exportLiteral(outfile, level, name_='type') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) + if self.defval: + showIndent(outfile, level) + outfile.write('defval=model_.linkedTextType(\n') + self.defval.exportLiteral(outfile, level, name_='defval') + showIndent(outfile, level) + outfile.write('),\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'type': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_type(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'declname': + declname_ = '' + for text__content_ in child_.childNodes: + declname_ += text__content_.nodeValue + self.declname = declname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'defname': + defname_ = '' + for text__content_ in child_.childNodes: + defname_ += text__content_.nodeValue + self.defname = defname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'array': + array_ = '' + for text__content_ in child_.childNodes: + array_ += text__content_.nodeValue + self.array = array_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'defval': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_defval(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) +# end class paramType + + +class declname(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if declname.subclass: + return declname.subclass(*args_, **kwargs_) + else: + return declname(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='declname') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='declname'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='declname'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='declname'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class declname + + +class defname(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if defname.subclass: + return defname.subclass(*args_, **kwargs_) + else: + return defname(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='defname') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='defname'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='defname'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='defname'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class defname + + +class array(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if array.subclass: + return array.subclass(*args_, **kwargs_) + else: + return array(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='array') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='array'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='array'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='array'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class array + + +class linkedTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, ref=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if linkedTextType.subclass: + return linkedTextType.subclass(*args_, **kwargs_) + else: + return linkedTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def add_ref(self, value): self.ref.append(value) + def insert_ref(self, index, value): self.ref[index] = value + def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='linkedTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class linkedTextType + + +class graphType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, node=None): + if node is None: + self.node = [] + else: + self.node = node + def factory(*args_, **kwargs_): + if graphType.subclass: + return graphType.subclass(*args_, **kwargs_) + else: + return graphType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_node(self): return self.node + def set_node(self, node): self.node = node + def add_node(self, value): self.node.append(value) + def insert_node(self, index, value): self.node[index] = value + def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='graphType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='graphType'): + for node_ in self.node: + node_.export(outfile, level, namespace_, name_='node') + def hasContent_(self): + if ( + self.node is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='graphType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('node=[\n') + level += 1 + for node in self.node: + showIndent(outfile, level) + outfile.write('model_.node(\n') + node.exportLiteral(outfile, level, name_='node') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'node': + obj_ = nodeType.factory() + obj_.build(child_) + self.node.append(obj_) +# end class graphType + + +class nodeType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, label=None, link=None, childnode=None): + self.id = id + self.label = label + self.link = link + if childnode is None: + self.childnode = [] + else: + self.childnode = childnode + def factory(*args_, **kwargs_): + if nodeType.subclass: + return nodeType.subclass(*args_, **kwargs_) + else: + return nodeType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_label(self): return self.label + def set_label(self, label): self.label = label + def get_link(self): return self.link + def set_link(self, link): self.link = link + def get_childnode(self): return self.childnode + def set_childnode(self, childnode): self.childnode = childnode + def add_childnode(self, value): self.childnode.append(value) + def insert_childnode(self, index, value): self.childnode[index] = value + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='nodeType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): + if self.label is not None: + showIndent(outfile, level) + outfile.write('<%slabel>%s\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) + if self.link: + self.link.export(outfile, level, namespace_, name_='link') + for childnode_ in self.childnode: + childnode_.export(outfile, level, namespace_, name_='childnode') + def hasContent_(self): + if ( + self.label is not None or + self.link is not None or + self.childnode is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='nodeType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) + if self.link: + showIndent(outfile, level) + outfile.write('link=model_.linkType(\n') + self.link.exportLiteral(outfile, level, name_='link') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('childnode=[\n') + level += 1 + for childnode in self.childnode: + showIndent(outfile, level) + outfile.write('model_.childnode(\n') + childnode.exportLiteral(outfile, level, name_='childnode') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'label': + label_ = '' + for text__content_ in child_.childNodes: + label_ += text__content_.nodeValue + self.label = label_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'link': + obj_ = linkType.factory() + obj_.build(child_) + self.set_link(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'childnode': + obj_ = childnodeType.factory() + obj_.build(child_) + self.childnode.append(obj_) +# end class nodeType + + +class label(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if label.subclass: + return label.subclass(*args_, **kwargs_) + else: + return label(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='label') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='label'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='label'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='label'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class label + + +class childnodeType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, relation=None, refid=None, edgelabel=None): + self.relation = relation + self.refid = refid + if edgelabel is None: + self.edgelabel = [] + else: + self.edgelabel = edgelabel + def factory(*args_, **kwargs_): + if childnodeType.subclass: + return childnodeType.subclass(*args_, **kwargs_) + else: + return childnodeType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_edgelabel(self): return self.edgelabel + def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel + def add_edgelabel(self, value): self.edgelabel.append(value) + def insert_edgelabel(self, index, value): self.edgelabel[index] = value + def get_relation(self): return self.relation + def set_relation(self, relation): self.relation = relation + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='childnodeType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): + if self.relation is not None: + outfile.write(' relation=%s' % (quote_attrib(self.relation), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): + for edgelabel_ in self.edgelabel: + showIndent(outfile, level) + outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) + def hasContent_(self): + if ( + self.edgelabel is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='childnodeType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.relation is not None: + showIndent(outfile, level) + outfile.write('relation = "%s",\n' % (self.relation,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('edgelabel=[\n') + level += 1 + for edgelabel in self.edgelabel: + showIndent(outfile, level) + outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('relation'): + self.relation = attrs.get('relation').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'edgelabel': + edgelabel_ = '' + for text__content_ in child_.childNodes: + edgelabel_ += text__content_.nodeValue + self.edgelabel.append(edgelabel_) +# end class childnodeType + + +class edgelabel(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if edgelabel.subclass: + return edgelabel.subclass(*args_, **kwargs_) + else: + return edgelabel(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='edgelabel') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='edgelabel'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class edgelabel + + +class linkType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, external=None, valueOf_=''): + self.refid = refid + self.external = external + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if linkType.subclass: + return linkType.subclass(*args_, **kwargs_) + else: + return linkType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='linkType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='linkType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='linkType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class linkType + + +class listingType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, codeline=None): + if codeline is None: + self.codeline = [] + else: + self.codeline = codeline + def factory(*args_, **kwargs_): + if listingType.subclass: + return listingType.subclass(*args_, **kwargs_) + else: + return listingType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_codeline(self): return self.codeline + def set_codeline(self, codeline): self.codeline = codeline + def add_codeline(self, value): self.codeline.append(value) + def insert_codeline(self, index, value): self.codeline[index] = value + def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='listingType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='listingType'): + for codeline_ in self.codeline: + codeline_.export(outfile, level, namespace_, name_='codeline') + def hasContent_(self): + if ( + self.codeline is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='listingType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('codeline=[\n') + level += 1 + for codeline in self.codeline: + showIndent(outfile, level) + outfile.write('model_.codeline(\n') + codeline.exportLiteral(outfile, level, name_='codeline') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'codeline': + obj_ = codelineType.factory() + obj_.build(child_) + self.codeline.append(obj_) +# end class listingType + + +class codelineType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): + self.external = external + self.lineno = lineno + self.refkind = refkind + self.refid = refid + if highlight is None: + self.highlight = [] + else: + self.highlight = highlight + def factory(*args_, **kwargs_): + if codelineType.subclass: + return codelineType.subclass(*args_, **kwargs_) + else: + return codelineType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_highlight(self): return self.highlight + def set_highlight(self, highlight): self.highlight = highlight + def add_highlight(self, value): self.highlight.append(value) + def insert_highlight(self, index, value): self.highlight[index] = value + def get_external(self): return self.external + def set_external(self, external): self.external = external + def get_lineno(self): return self.lineno + def set_lineno(self, lineno): self.lineno = lineno + def get_refkind(self): return self.refkind + def set_refkind(self, refkind): self.refkind = refkind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='codelineType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): + if self.external is not None: + outfile.write(' external=%s' % (quote_attrib(self.external), )) + if self.lineno is not None: + outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) + if self.refkind is not None: + outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): + for highlight_ in self.highlight: + highlight_.export(outfile, level, namespace_, name_='highlight') + def hasContent_(self): + if ( + self.highlight is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='codelineType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = "%s",\n' % (self.external,)) + if self.lineno is not None: + showIndent(outfile, level) + outfile.write('lineno = %s,\n' % (self.lineno,)) + if self.refkind is not None: + showIndent(outfile, level) + outfile.write('refkind = "%s",\n' % (self.refkind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('highlight=[\n') + level += 1 + for highlight in self.highlight: + showIndent(outfile, level) + outfile.write('model_.highlight(\n') + highlight.exportLiteral(outfile, level, name_='highlight') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('external'): + self.external = attrs.get('external').value + if attrs.get('lineno'): + try: + self.lineno = int(attrs.get('lineno').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (lineno): %s' % exp) + if attrs.get('refkind'): + self.refkind = attrs.get('refkind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'highlight': + obj_ = highlightType.factory() + obj_.build(child_) + self.highlight.append(obj_) +# end class codelineType + + +class highlightType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): + self.classxx = classxx + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if highlightType.subclass: + return highlightType.subclass(*args_, **kwargs_) + else: + return highlightType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_sp(self): return self.sp + def set_sp(self, sp): self.sp = sp + def add_sp(self, value): self.sp.append(value) + def insert_sp(self, index, value): self.sp[index] = value + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def add_ref(self, value): self.ref.append(value) + def insert_ref(self, index, value): self.ref[index] = value + def get_class(self): return self.classxx + def set_class(self, classxx): self.classxx = classxx + def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='highlightType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): + if self.classxx is not None: + outfile.write(' class=%s' % (quote_attrib(self.classxx), )) + def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.sp is not None or + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='highlightType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.classxx is not None: + showIndent(outfile, level) + outfile.write('classxx = "%s",\n' % (self.classxx,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('class'): + self.classxx = attrs.get('class').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sp': + value_ = [] + for text_ in child_.childNodes: + value_.append(text_.nodeValue) + valuestr_ = ''.join(value_) + obj_ = self.mixedclass_(MixedContainer.CategorySimple, + MixedContainer.TypeString, 'sp', valuestr_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class highlightType + + +class sp(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if sp.subclass: + return sp.subclass(*args_, **kwargs_) + else: + return sp(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='sp') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sp'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='sp'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='sp'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class sp + + +class referenceType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): + self.endline = endline + self.startline = startline + self.refid = refid + self.compoundref = compoundref + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if referenceType.subclass: + return referenceType.subclass(*args_, **kwargs_) + else: + return referenceType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_endline(self): return self.endline + def set_endline(self, endline): self.endline = endline + def get_startline(self): return self.startline + def set_startline(self, startline): self.startline = startline + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_compoundref(self): return self.compoundref + def set_compoundref(self, compoundref): self.compoundref = compoundref + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='referenceType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): + if self.endline is not None: + outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) + if self.startline is not None: + outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.compoundref is not None: + outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) + def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='referenceType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.endline is not None: + showIndent(outfile, level) + outfile.write('endline = %s,\n' % (self.endline,)) + if self.startline is not None: + showIndent(outfile, level) + outfile.write('startline = %s,\n' % (self.startline,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.compoundref is not None: + showIndent(outfile, level) + outfile.write('compoundref = %s,\n' % (self.compoundref,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('endline'): + try: + self.endline = int(attrs.get('endline').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (endline): %s' % exp) + if attrs.get('startline'): + try: + self.startline = int(attrs.get('startline').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (startline): %s' % exp) + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('compoundref'): + self.compoundref = attrs.get('compoundref').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class referenceType + + +class locationType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): + self.bodystart = bodystart + self.line = line + self.bodyend = bodyend + self.bodyfile = bodyfile + self.file = file + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if locationType.subclass: + return locationType.subclass(*args_, **kwargs_) + else: + return locationType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_bodystart(self): return self.bodystart + def set_bodystart(self, bodystart): self.bodystart = bodystart + def get_line(self): return self.line + def set_line(self, line): self.line = line + def get_bodyend(self): return self.bodyend + def set_bodyend(self, bodyend): self.bodyend = bodyend + def get_bodyfile(self): return self.bodyfile + def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile + def get_file(self): return self.file + def set_file(self, file): self.file = file + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='locationType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): + if self.bodystart is not None: + outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) + if self.line is not None: + outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) + if self.bodyend is not None: + outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) + if self.bodyfile is not None: + outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) + if self.file is not None: + outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) + def exportChildren(self, outfile, level, namespace_='', name_='locationType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='locationType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.bodystart is not None: + showIndent(outfile, level) + outfile.write('bodystart = %s,\n' % (self.bodystart,)) + if self.line is not None: + showIndent(outfile, level) + outfile.write('line = %s,\n' % (self.line,)) + if self.bodyend is not None: + showIndent(outfile, level) + outfile.write('bodyend = %s,\n' % (self.bodyend,)) + if self.bodyfile is not None: + showIndent(outfile, level) + outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) + if self.file is not None: + showIndent(outfile, level) + outfile.write('file = %s,\n' % (self.file,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('bodystart'): + try: + self.bodystart = int(attrs.get('bodystart').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (bodystart): %s' % exp) + if attrs.get('line'): + try: + self.line = int(attrs.get('line').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (line): %s' % exp) + if attrs.get('bodyend'): + try: + self.bodyend = int(attrs.get('bodyend').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (bodyend): %s' % exp) + if attrs.get('bodyfile'): + self.bodyfile = attrs.get('bodyfile').value + if attrs.get('file'): + self.file = attrs.get('file').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class locationType + + +class docSect1Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect1Type.subclass: + return docSect1Type.subclass(*args_, **kwargs_) + else: + return docSect1Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect2(self): return self.sect2 + def set_sect2(self, sect2): self.sect2 = sect2 + def add_sect2(self, value): self.sect2.append(value) + def insert_sect2(self, index, value): self.sect2[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect2 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect1Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect2': + childobj_ = docSect2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect2', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect1Type + + +class docSect2Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect2Type.subclass: + return docSect2Type.subclass(*args_, **kwargs_) + else: + return docSect2Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect3 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect2Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect2Type + + +class docSect3Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect3Type.subclass: + return docSect3Type.subclass(*args_, **kwargs_) + else: + return docSect3Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect4(self): return self.sect4 + def set_sect4(self, sect4): self.sect4 = sect4 + def add_sect4(self, value): self.sect4.append(value) + def insert_sect4(self, index, value): self.sect4[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect4 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect3Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect4': + childobj_ = docSect4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect4', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect3Type + + +class docSect4Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect4Type.subclass: + return docSect4Type.subclass(*args_, **kwargs_) + else: + return docSect4Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect4Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect4Type + + +class docInternalType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalType.subclass: + return docInternalType.subclass(*args_, **kwargs_) + else: + return docInternalType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect1 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + childobj_ = docSect1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect1', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalType + + +class docInternalS1Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS1Type.subclass: + return docInternalS1Type.subclass(*args_, **kwargs_) + else: + return docInternalS1Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect2(self): return self.sect2 + def set_sect2(self, sect2): self.sect2 = sect2 + def add_sect2(self, value): self.sect2.append(value) + def insert_sect2(self, index, value): self.sect2[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect2 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS1Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect2': + childobj_ = docSect2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect2', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS1Type + + +class docInternalS2Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS2Type.subclass: + return docInternalS2Type.subclass(*args_, **kwargs_) + else: + return docInternalS2Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect3 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS2Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS2Type + + +class docInternalS3Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS3Type.subclass: + return docInternalS3Type.subclass(*args_, **kwargs_) + else: + return docInternalS3Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect3 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS3Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS3Type + + +class docInternalS4Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS4Type.subclass: + return docInternalS4Type.subclass(*args_, **kwargs_) + else: + return docInternalS4Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS4Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS4Type + + +class docTitleType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docTitleType.subclass: + return docTitleType.subclass(*args_, **kwargs_) + else: + return docTitleType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTitleType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTitleType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docTitleType + + +class docParaType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docParaType.subclass: + return docParaType.subclass(*args_, **kwargs_) + else: + return docParaType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParaType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParaType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docParaType + + +class docMarkupType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docMarkupType.subclass: + return docMarkupType.subclass(*args_, **kwargs_) + else: + return docMarkupType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docMarkupType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docMarkupType + + +class docURLLink(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): + self.url = url + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docURLLink.subclass: + return docURLLink.subclass(*args_, **kwargs_) + else: + return docURLLink(*args_, **kwargs_) + factory = staticmethod(factory) + def get_url(self): return self.url + def set_url(self, url): self.url = url + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docURLLink') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): + if self.url is not None: + outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docURLLink'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.url is not None: + showIndent(outfile, level) + outfile.write('url = %s,\n' % (self.url,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('url'): + self.url = attrs.get('url').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docURLLink + + +class docAnchorType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docAnchorType.subclass: + return docAnchorType.subclass(*args_, **kwargs_) + else: + return docAnchorType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docAnchorType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docAnchorType + + +class docFormulaType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docFormulaType.subclass: + return docFormulaType.subclass(*args_, **kwargs_) + else: + return docFormulaType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docFormulaType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docFormulaType + + +class docIndexEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, primaryie=None, secondaryie=None): + self.primaryie = primaryie + self.secondaryie = secondaryie + def factory(*args_, **kwargs_): + if docIndexEntryType.subclass: + return docIndexEntryType.subclass(*args_, **kwargs_) + else: + return docIndexEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_primaryie(self): return self.primaryie + def set_primaryie(self, primaryie): self.primaryie = primaryie + def get_secondaryie(self): return self.secondaryie + def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie + def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): + if self.primaryie is not None: + showIndent(outfile, level) + outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) + if self.secondaryie is not None: + showIndent(outfile, level) + outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) + def hasContent_(self): + if ( + self.primaryie is not None or + self.secondaryie is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docIndexEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'primaryie': + primaryie_ = '' + for text__content_ in child_.childNodes: + primaryie_ += text__content_.nodeValue + self.primaryie = primaryie_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'secondaryie': + secondaryie_ = '' + for text__content_ in child_.childNodes: + secondaryie_ += text__content_.nodeValue + self.secondaryie = secondaryie_ +# end class docIndexEntryType + + +class docListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, listitem=None): + if listitem is None: + self.listitem = [] + else: + self.listitem = listitem + def factory(*args_, **kwargs_): + if docListType.subclass: + return docListType.subclass(*args_, **kwargs_) + else: + return docListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_listitem(self): return self.listitem + def set_listitem(self, listitem): self.listitem = listitem + def add_listitem(self, value): self.listitem.append(value) + def insert_listitem(self, index, value): self.listitem[index] = value + def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docListType'): + for listitem_ in self.listitem: + listitem_.export(outfile, level, namespace_, name_='listitem') + def hasContent_(self): + if ( + self.listitem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('listitem=[\n') + level += 1 + for listitem in self.listitem: + showIndent(outfile, level) + outfile.write('model_.listitem(\n') + listitem.exportLiteral(outfile, level, name_='listitem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'listitem': + obj_ = docListItemType.factory() + obj_.build(child_) + self.listitem.append(obj_) +# end class docListType + + +class docListItemType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None): + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docListItemType.subclass: + return docListItemType.subclass(*args_, **kwargs_) + else: + return docListItemType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docListItemType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docListItemType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docListItemType + + +class docSimpleSectType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, title=None, para=None): + self.kind = kind + self.title = title + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docSimpleSectType.subclass: + return docSimpleSectType.subclass(*args_, **kwargs_) + else: + return docSimpleSectType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): + if self.title: + self.title.export(outfile, level, namespace_, name_='title') + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.title is not None or + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSimpleSectType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.title: + showIndent(outfile, level) + outfile.write('title=model_.docTitleType(\n') + self.title.exportLiteral(outfile, level, name_='title') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_title(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docSimpleSectType + + +class docVarListEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, term=None): + self.term = term + def factory(*args_, **kwargs_): + if docVarListEntryType.subclass: + return docVarListEntryType.subclass(*args_, **kwargs_) + else: + return docVarListEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_term(self): return self.term + def set_term(self, term): self.term = term + def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): + if self.term: + self.term.export(outfile, level, namespace_, name_='term', ) + def hasContent_(self): + if ( + self.term is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docVarListEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + if self.term: + showIndent(outfile, level) + outfile.write('term=model_.docTitleType(\n') + self.term.exportLiteral(outfile, level, name_='term') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'term': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_term(obj_) +# end class docVarListEntryType + + +class docVariableListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docVariableListType.subclass: + return docVariableListType.subclass(*args_, **kwargs_) + else: + return docVariableListType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docVariableListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docVariableListType + + +class docRefTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + self.kindref = kindref + self.external = external + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docRefTextType.subclass: + return docRefTextType.subclass(*args_, **kwargs_) + else: + return docRefTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_kindref(self): return self.kindref + def set_kindref(self, kindref): self.kindref = kindref + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.kindref is not None: + outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docRefTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.kindref is not None: + showIndent(outfile, level) + outfile.write('kindref = "%s",\n' % (self.kindref,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('kindref'): + self.kindref = attrs.get('kindref').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docRefTextType + + +class docTableType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, rows=None, cols=None, row=None, caption=None): + self.rows = rows + self.cols = cols + if row is None: + self.row = [] + else: + self.row = row + self.caption = caption + def factory(*args_, **kwargs_): + if docTableType.subclass: + return docTableType.subclass(*args_, **kwargs_) + else: + return docTableType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_row(self): return self.row + def set_row(self, row): self.row = row + def add_row(self, value): self.row.append(value) + def insert_row(self, index, value): self.row[index] = value + def get_caption(self): return self.caption + def set_caption(self, caption): self.caption = caption + def get_rows(self): return self.rows + def set_rows(self, rows): self.rows = rows + def get_cols(self): return self.cols + def set_cols(self, cols): self.cols = cols + def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTableType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): + if self.rows is not None: + outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) + if self.cols is not None: + outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) + def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): + for row_ in self.row: + row_.export(outfile, level, namespace_, name_='row') + if self.caption: + self.caption.export(outfile, level, namespace_, name_='caption') + def hasContent_(self): + if ( + self.row is not None or + self.caption is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTableType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.rows is not None: + showIndent(outfile, level) + outfile.write('rows = %s,\n' % (self.rows,)) + if self.cols is not None: + showIndent(outfile, level) + outfile.write('cols = %s,\n' % (self.cols,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('row=[\n') + level += 1 + for row in self.row: + showIndent(outfile, level) + outfile.write('model_.row(\n') + row.exportLiteral(outfile, level, name_='row') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.caption: + showIndent(outfile, level) + outfile.write('caption=model_.docCaptionType(\n') + self.caption.exportLiteral(outfile, level, name_='caption') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('rows'): + try: + self.rows = int(attrs.get('rows').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (rows): %s' % exp) + if attrs.get('cols'): + try: + self.cols = int(attrs.get('cols').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (cols): %s' % exp) + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'row': + obj_ = docRowType.factory() + obj_.build(child_) + self.row.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'caption': + obj_ = docCaptionType.factory() + obj_.build(child_) + self.set_caption(obj_) +# end class docTableType + + +class docRowType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, entry=None): + if entry is None: + self.entry = [] + else: + self.entry = entry + def factory(*args_, **kwargs_): + if docRowType.subclass: + return docRowType.subclass(*args_, **kwargs_) + else: + return docRowType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_entry(self): return self.entry + def set_entry(self, entry): self.entry = entry + def add_entry(self, value): self.entry.append(value) + def insert_entry(self, index, value): self.entry[index] = value + def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docRowType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): + for entry_ in self.entry: + entry_.export(outfile, level, namespace_, name_='entry') + def hasContent_(self): + if ( + self.entry is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docRowType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('entry=[\n') + level += 1 + for entry in self.entry: + showIndent(outfile, level) + outfile.write('model_.entry(\n') + entry.exportLiteral(outfile, level, name_='entry') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'entry': + obj_ = docEntryType.factory() + obj_.build(child_) + self.entry.append(obj_) +# end class docRowType + + +class docEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, thead=None, para=None): + self.thead = thead + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docEntryType.subclass: + return docEntryType.subclass(*args_, **kwargs_) + else: + return docEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_thead(self): return self.thead + def set_thead(self, thead): self.thead = thead + def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): + if self.thead is not None: + outfile.write(' thead=%s' % (quote_attrib(self.thead), )) + def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.thead is not None: + showIndent(outfile, level) + outfile.write('thead = "%s",\n' % (self.thead,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('thead'): + self.thead = attrs.get('thead').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docEntryType + + +class docCaptionType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docCaptionType.subclass: + return docCaptionType.subclass(*args_, **kwargs_) + else: + return docCaptionType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCaptionType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docCaptionType + + +class docHeadingType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): + self.level = level + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docHeadingType.subclass: + return docHeadingType.subclass(*args_, **kwargs_) + else: + return docHeadingType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_level(self): return self.level + def set_level(self, level): self.level = level + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): + if self.level is not None: + outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) + def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docHeadingType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.level is not None: + showIndent(outfile, level) + outfile.write('level = %s,\n' % (self.level,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('level'): + try: + self.level = int(attrs.get('level').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (level): %s' % exp) + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docHeadingType + + +class docImageType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): + self.width = width + self.type_ = type_ + self.name = name + self.height = height + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docImageType.subclass: + return docImageType.subclass(*args_, **kwargs_) + else: + return docImageType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_width(self): return self.width + def set_width(self, width): self.width = width + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_height(self): return self.height + def set_height(self, height): self.height = height + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docImageType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): + if self.width is not None: + outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) + if self.type_ is not None: + outfile.write(' type=%s' % (quote_attrib(self.type_), )) + if self.name is not None: + outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + if self.height is not None: + outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docImageType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.width is not None: + showIndent(outfile, level) + outfile.write('width = %s,\n' % (self.width,)) + if self.type_ is not None: + showIndent(outfile, level) + outfile.write('type_ = "%s",\n' % (self.type_,)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('name = %s,\n' % (self.name,)) + if self.height is not None: + showIndent(outfile, level) + outfile.write('height = %s,\n' % (self.height,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('width'): + self.width = attrs.get('width').value + if attrs.get('type'): + self.type_ = attrs.get('type').value + if attrs.get('name'): + self.name = attrs.get('name').value + if attrs.get('height'): + self.height = attrs.get('height').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docImageType + + +class docDotFileType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): + self.name = name + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docDotFileType.subclass: + return docDotFileType.subclass(*args_, **kwargs_) + else: + return docDotFileType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): + if self.name is not None: + outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docDotFileType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.name is not None: + showIndent(outfile, level) + outfile.write('name = %s,\n' % (self.name,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('name'): + self.name = attrs.get('name').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docDotFileType + + +class docTocItemType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docTocItemType.subclass: + return docTocItemType.subclass(*args_, **kwargs_) + else: + return docTocItemType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTocItemType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docTocItemType + + +class docTocListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, tocitem=None): + if tocitem is None: + self.tocitem = [] + else: + self.tocitem = tocitem + def factory(*args_, **kwargs_): + if docTocListType.subclass: + return docTocListType.subclass(*args_, **kwargs_) + else: + return docTocListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_tocitem(self): return self.tocitem + def set_tocitem(self, tocitem): self.tocitem = tocitem + def add_tocitem(self, value): self.tocitem.append(value) + def insert_tocitem(self, index, value): self.tocitem[index] = value + def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTocListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): + for tocitem_ in self.tocitem: + tocitem_.export(outfile, level, namespace_, name_='tocitem') + def hasContent_(self): + if ( + self.tocitem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTocListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('tocitem=[\n') + level += 1 + for tocitem in self.tocitem: + showIndent(outfile, level) + outfile.write('model_.tocitem(\n') + tocitem.exportLiteral(outfile, level, name_='tocitem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'tocitem': + obj_ = docTocItemType.factory() + obj_.build(child_) + self.tocitem.append(obj_) +# end class docTocListType + + +class docLanguageType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, langid=None, para=None): + self.langid = langid + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docLanguageType.subclass: + return docLanguageType.subclass(*args_, **kwargs_) + else: + return docLanguageType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_langid(self): return self.langid + def set_langid(self, langid): self.langid = langid + def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): + if self.langid is not None: + outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docLanguageType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.langid is not None: + showIndent(outfile, level) + outfile.write('langid = %s,\n' % (self.langid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('langid'): + self.langid = attrs.get('langid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docLanguageType + + +class docParamListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, parameteritem=None): + self.kind = kind + if parameteritem is None: + self.parameteritem = [] + else: + self.parameteritem = parameteritem + def factory(*args_, **kwargs_): + if docParamListType.subclass: + return docParamListType.subclass(*args_, **kwargs_) + else: + return docParamListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parameteritem(self): return self.parameteritem + def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem + def add_parameteritem(self, value): self.parameteritem.append(value) + def insert_parameteritem(self, index, value): self.parameteritem[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): + for parameteritem_ in self.parameteritem: + parameteritem_.export(outfile, level, namespace_, name_='parameteritem') + def hasContent_(self): + if ( + self.parameteritem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parameteritem=[\n') + level += 1 + for parameteritem in self.parameteritem: + showIndent(outfile, level) + outfile.write('model_.parameteritem(\n') + parameteritem.exportLiteral(outfile, level, name_='parameteritem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameteritem': + obj_ = docParamListItem.factory() + obj_.build(child_) + self.parameteritem.append(obj_) +# end class docParamListType + + +class docParamListItem(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, parameternamelist=None, parameterdescription=None): + if parameternamelist is None: + self.parameternamelist = [] + else: + self.parameternamelist = parameternamelist + self.parameterdescription = parameterdescription + def factory(*args_, **kwargs_): + if docParamListItem.subclass: + return docParamListItem.subclass(*args_, **kwargs_) + else: + return docParamListItem(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parameternamelist(self): return self.parameternamelist + def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist + def add_parameternamelist(self, value): self.parameternamelist.append(value) + def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value + def get_parameterdescription(self): return self.parameterdescription + def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription + def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): + for parameternamelist_ in self.parameternamelist: + parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') + if self.parameterdescription: + self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) + def hasContent_(self): + if ( + self.parameternamelist is not None or + self.parameterdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamListItem'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parameternamelist=[\n') + level += 1 + for parameternamelist in self.parameternamelist: + showIndent(outfile, level) + outfile.write('model_.parameternamelist(\n') + parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.parameterdescription: + showIndent(outfile, level) + outfile.write('parameterdescription=model_.descriptionType(\n') + self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameternamelist': + obj_ = docParamNameList.factory() + obj_.build(child_) + self.parameternamelist.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameterdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_parameterdescription(obj_) +# end class docParamListItem + + +class docParamNameList(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, parametername=None): + if parametername is None: + self.parametername = [] + else: + self.parametername = parametername + def factory(*args_, **kwargs_): + if docParamNameList.subclass: + return docParamNameList.subclass(*args_, **kwargs_) + else: + return docParamNameList(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parametername(self): return self.parametername + def set_parametername(self, parametername): self.parametername = parametername + def add_parametername(self, value): self.parametername.append(value) + def insert_parametername(self, index, value): self.parametername[index] = value + def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): + for parametername_ in self.parametername: + parametername_.export(outfile, level, namespace_, name_='parametername') + def hasContent_(self): + if ( + self.parametername is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamNameList'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parametername=[\n') + level += 1 + for parametername in self.parametername: + showIndent(outfile, level) + outfile.write('model_.parametername(\n') + parametername.exportLiteral(outfile, level, name_='parametername') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parametername': + obj_ = docParamName.factory() + obj_.build(child_) + self.parametername.append(obj_) +# end class docParamNameList + + +class docParamName(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): + self.direction = direction + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docParamName.subclass: + return docParamName.subclass(*args_, **kwargs_) + else: + return docParamName(*args_, **kwargs_) + factory = staticmethod(factory) + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def get_direction(self): return self.direction + def set_direction(self, direction): self.direction = direction + def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamName') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): + if self.direction is not None: + outfile.write(' direction=%s' % (quote_attrib(self.direction), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamName'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.direction is not None: + showIndent(outfile, level) + outfile.write('direction = "%s",\n' % (self.direction,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('direction'): + self.direction = attrs.get('direction').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docParamName + + +class docXRefSectType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, xreftitle=None, xrefdescription=None): + self.id = id + if xreftitle is None: + self.xreftitle = [] + else: + self.xreftitle = xreftitle + self.xrefdescription = xrefdescription + def factory(*args_, **kwargs_): + if docXRefSectType.subclass: + return docXRefSectType.subclass(*args_, **kwargs_) + else: + return docXRefSectType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_xreftitle(self): return self.xreftitle + def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle + def add_xreftitle(self, value): self.xreftitle.append(value) + def insert_xreftitle(self, index, value): self.xreftitle[index] = value + def get_xrefdescription(self): return self.xrefdescription + def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): + for xreftitle_ in self.xreftitle: + showIndent(outfile, level) + outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) + if self.xrefdescription: + self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) + def hasContent_(self): + if ( + self.xreftitle is not None or + self.xrefdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docXRefSectType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('xreftitle=[\n') + level += 1 + for xreftitle in self.xreftitle: + showIndent(outfile, level) + outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.xrefdescription: + showIndent(outfile, level) + outfile.write('xrefdescription=model_.descriptionType(\n') + self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'xreftitle': + xreftitle_ = '' + for text__content_ in child_.childNodes: + xreftitle_ += text__content_.nodeValue + self.xreftitle.append(xreftitle_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'xrefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_xrefdescription(obj_) +# end class docXRefSectType + + +class docCopyType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, link=None, para=None, sect1=None, internal=None): + self.link = link + if para is None: + self.para = [] + else: + self.para = para + if sect1 is None: + self.sect1 = [] + else: + self.sect1 = sect1 + self.internal = internal + def factory(*args_, **kwargs_): + if docCopyType.subclass: + return docCopyType.subclass(*args_, **kwargs_) + else: + return docCopyType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_link(self): return self.link + def set_link(self, link): self.link = link + def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCopyType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): + if self.link is not None: + outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + for sect1_ in self.sect1: + sect1_.export(outfile, level, namespace_, name_='sect1') + if self.internal: + self.internal.export(outfile, level, namespace_, name_='internal') + def hasContent_(self): + if ( + self.para is not None or + self.sect1 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCopyType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.link is not None: + showIndent(outfile, level) + outfile.write('link = %s,\n' % (self.link,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('sect1=[\n') + level += 1 + for sect1 in self.sect1: + showIndent(outfile, level) + outfile.write('model_.sect1(\n') + sect1.exportLiteral(outfile, level, name_='sect1') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.internal: + showIndent(outfile, level) + outfile.write('internal=model_.docInternalType(\n') + self.internal.exportLiteral(outfile, level, name_='internal') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('link'): + self.link = attrs.get('link').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + obj_ = docSect1Type.factory() + obj_.build(child_) + self.sect1.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + obj_ = docInternalType.factory() + obj_.build(child_) + self.set_internal(obj_) +# end class docCopyType + + +class docCharType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, char=None, valueOf_=''): + self.char = char + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docCharType.subclass: + return docCharType.subclass(*args_, **kwargs_) + else: + return docCharType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_char(self): return self.char + def set_char(self, char): self.char = char + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCharType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): + if self.char is not None: + outfile.write(' char=%s' % (quote_attrib(self.char), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCharType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.char is not None: + showIndent(outfile, level) + outfile.write('char = "%s",\n' % (self.char,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('char'): + self.char = attrs.get('char').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docCharType + + +class docEmptyType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docEmptyType.subclass: + return docEmptyType.subclass(*args_, **kwargs_) + else: + return docEmptyType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docEmptyType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docEmptyType + + +USAGE_TEXT = """ +Usage: python .py [ -s ] +Options: + -s Use the SAX parser, not the minidom parser. +""" + +def usage(): + print USAGE_TEXT + sys.exit(1) + + +def parse(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygen", + namespacedef_='') + return rootObj + + +def parseString(inString): + doc = minidom.parseString(inString) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygen", + namespacedef_='') + return rootObj + + +def parseLiteral(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('from compound import *\n\n') + sys.stdout.write('rootObj = doxygen(\n') + rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") + sys.stdout.write(')\n') + return rootObj + + +def main(): + args = sys.argv[1:] + if len(args) == 1: + parse(args[0]) + else: + usage() + + +if __name__ == '__main__': + main() + #import pdb + #pdb.run('main()') + diff --git a/docs/doxygen/doxyxml/generated/index.py b/docs/doxygen/doxyxml/generated/index.py new file mode 100644 index 0000000..7a70e14 --- /dev/null +++ b/docs/doxygen/doxyxml/generated/index.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +""" +Generated Mon Feb 9 19:08:05 2009 by generateDS.py. +""" + +from xml.dom import minidom + +import os +import sys +import compound + +import indexsuper as supermod + +class DoxygenTypeSub(supermod.DoxygenType): + def __init__(self, version=None, compound=None): + supermod.DoxygenType.__init__(self, version, compound) + + def find_compounds_and_members(self, details): + """ + Returns a list of all compounds and their members which match details + """ + + results = [] + for compound in self.compound: + members = compound.find_members(details) + if members: + results.append([compound, members]) + else: + if details.match(compound): + results.append([compound, []]) + + return results + +supermod.DoxygenType.subclass = DoxygenTypeSub +# end class DoxygenTypeSub + + +class CompoundTypeSub(supermod.CompoundType): + def __init__(self, kind=None, refid=None, name='', member=None): + supermod.CompoundType.__init__(self, kind, refid, name, member) + + def find_members(self, details): + """ + Returns a list of all members which match details + """ + + results = [] + + for member in self.member: + if details.match(member): + results.append(member) + + return results + +supermod.CompoundType.subclass = CompoundTypeSub +# end class CompoundTypeSub + + +class MemberTypeSub(supermod.MemberType): + + def __init__(self, kind=None, refid=None, name=''): + supermod.MemberType.__init__(self, kind, refid, name) + +supermod.MemberType.subclass = MemberTypeSub +# end class MemberTypeSub + + +def parse(inFilename): + + doc = minidom.parse(inFilename) + rootNode = doc.documentElement + rootObj = supermod.DoxygenType.factory() + rootObj.build(rootNode) + + return rootObj + diff --git a/docs/doxygen/doxyxml/generated/indexsuper.py b/docs/doxygen/doxyxml/generated/indexsuper.py new file mode 100644 index 0000000..a991530 --- /dev/null +++ b/docs/doxygen/doxyxml/generated/indexsuper.py @@ -0,0 +1,523 @@ +#!/usr/bin/env python + +# +# Generated Thu Jun 11 18:43:54 2009 by generateDS.py. +# + +import sys +import getopt +from string import lower as str_lower +from xml.dom import minidom +from xml.dom import Node + +# +# User methods +# +# Calls to the methods in these classes are generated by generateDS.py. +# You can replace these methods by re-implementing the following class +# in a module named generatedssuper.py. + +try: + from generatedssuper import GeneratedsSuper +except ImportError, exp: + + class GeneratedsSuper: + def format_string(self, input_data, input_name=''): + return input_data + def format_integer(self, input_data, input_name=''): + return '%d' % input_data + def format_float(self, input_data, input_name=''): + return '%f' % input_data + def format_double(self, input_data, input_name=''): + return '%e' % input_data + def format_boolean(self, input_data, input_name=''): + return '%s' % input_data + + +# +# If you have installed IPython you can uncomment and use the following. +# IPython is available from http://ipython.scipy.org/. +# + +## from IPython.Shell import IPShellEmbed +## args = '' +## ipshell = IPShellEmbed(args, +## banner = 'Dropping into IPython', +## exit_msg = 'Leaving Interpreter, back to program.') + +# Then use the following line where and when you want to drop into the +# IPython shell: +# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') + +# +# Globals +# + +ExternalEncoding = 'ascii' + +# +# Support/utility functions. +# + +def showIndent(outfile, level): + for idx in range(level): + outfile.write(' ') + +def quote_xml(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + return s1 + +def quote_attrib(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + if '"' in s1: + if "'" in s1: + s1 = '"%s"' % s1.replace('"', """) + else: + s1 = "'%s'" % s1 + else: + s1 = '"%s"' % s1 + return s1 + +def quote_python(inStr): + s1 = inStr + if s1.find("'") == -1: + if s1.find('\n') == -1: + return "'%s'" % s1 + else: + return "'''%s'''" % s1 + else: + if s1.find('"') != -1: + s1 = s1.replace('"', '\\"') + if s1.find('\n') == -1: + return '"%s"' % s1 + else: + return '"""%s"""' % s1 + + +class MixedContainer: + # Constants for category: + CategoryNone = 0 + CategoryText = 1 + CategorySimple = 2 + CategoryComplex = 3 + # Constants for content_type: + TypeNone = 0 + TypeText = 1 + TypeString = 2 + TypeInteger = 3 + TypeFloat = 4 + TypeDecimal = 5 + TypeDouble = 6 + TypeBoolean = 7 + def __init__(self, category, content_type, name, value): + self.category = category + self.content_type = content_type + self.name = name + self.value = value + def getCategory(self): + return self.category + def getContenttype(self, content_type): + return self.content_type + def getValue(self): + return self.value + def getName(self): + return self.name + def export(self, outfile, level, name, namespace): + if self.category == MixedContainer.CategoryText: + outfile.write(self.value) + elif self.category == MixedContainer.CategorySimple: + self.exportSimple(outfile, level, name) + else: # category == MixedContainer.CategoryComplex + self.value.export(outfile, level, namespace,name) + def exportSimple(self, outfile, level, name): + if self.content_type == MixedContainer.TypeString: + outfile.write('<%s>%s' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeInteger or \ + self.content_type == MixedContainer.TypeBoolean: + outfile.write('<%s>%d' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeFloat or \ + self.content_type == MixedContainer.TypeDecimal: + outfile.write('<%s>%f' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): + if self.category == MixedContainer.CategoryText: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + elif self.category == MixedContainer.CategorySimple: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + else: # category == MixedContainer.CategoryComplex + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s",\n' % \ + (self.category, self.content_type, self.name,)) + self.value.exportLiteral(outfile, level + 1) + showIndent(outfile, level) + outfile.write(')\n') + + +class _MemberSpec(object): + def __init__(self, name='', data_type='', container=0): + self.name = name + self.data_type = data_type + self.container = container + def set_name(self, name): self.name = name + def get_name(self): return self.name + def set_data_type(self, data_type): self.data_type = data_type + def get_data_type(self): return self.data_type + def set_container(self, container): self.container = container + def get_container(self): return self.container + + +# +# Data representation classes. +# + +class DoxygenType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, version=None, compound=None): + self.version = version + if compound is None: + self.compound = [] + else: + self.compound = compound + def factory(*args_, **kwargs_): + if DoxygenType.subclass: + return DoxygenType.subclass(*args_, **kwargs_) + else: + return DoxygenType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compound(self): return self.compound + def set_compound(self, compound): self.compound = compound + def add_compound(self, value): self.compound.append(value) + def insert_compound(self, index, value): self.compound[index] = value + def get_version(self): return self.version + def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): + outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): + for compound_ in self.compound: + compound_.export(outfile, level, namespace_, name_='compound') + def hasContent_(self): + if ( + self.compound is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.version is not None: + showIndent(outfile, level) + outfile.write('version = %s,\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('compound=[\n') + level += 1 + for compound in self.compound: + showIndent(outfile, level) + outfile.write('model_.compound(\n') + compound.exportLiteral(outfile, level, name_='compound') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('version'): + self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compound': + obj_ = CompoundType.factory() + obj_.build(child_) + self.compound.append(obj_) +# end class DoxygenType + + +class CompoundType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None, member=None): + self.kind = kind + self.refid = refid + self.name = name + if member is None: + self.member = [] + else: + self.member = member + def factory(*args_, **kwargs_): + if CompoundType.subclass: + return CompoundType.subclass(*args_, **kwargs_) + else: + return CompoundType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='CompoundType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'): + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'): + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + for member_ in self.member: + member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): + if ( + self.name is not None or + self.member is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='CompoundType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('member=[\n') + level += 1 + for member in self.member: + showIndent(outfile, level) + outfile.write('model_.member(\n') + member.exportLiteral(outfile, level, name_='member') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'member': + obj_ = MemberType.factory() + obj_.build(child_) + self.member.append(obj_) +# end class CompoundType + + +class MemberType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None): + self.kind = kind + self.refid = refid + self.name = name + def factory(*args_, **kwargs_): + if MemberType.subclass: + return MemberType.subclass(*args_, **kwargs_) + else: + return MemberType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='MemberType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'): + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='MemberType'): + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): + if ( + self.name is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='MemberType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class MemberType + + +USAGE_TEXT = """ +Usage: python .py [ -s ] +Options: + -s Use the SAX parser, not the minidom parser. +""" + +def usage(): + print USAGE_TEXT + sys.exit(1) + + +def parse(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygenindex", + namespacedef_='') + return rootObj + + +def parseString(inString): + doc = minidom.parseString(inString) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygenindex", + namespacedef_='') + return rootObj + + +def parseLiteral(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('from index import *\n\n') + sys.stdout.write('rootObj = doxygenindex(\n') + rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex") + sys.stdout.write(')\n') + return rootObj + + +def main(): + args = sys.argv[1:] + if len(args) == 1: + parse(args[0]) + else: + usage() + + + + +if __name__ == '__main__': + main() + #import pdb + #pdb.run('main()') + diff --git a/docs/doxygen/doxyxml/text.py b/docs/doxygen/doxyxml/text.py new file mode 100644 index 0000000..4a79d0a --- /dev/null +++ b/docs/doxygen/doxyxml/text.py @@ -0,0 +1,53 @@ +''' + Copyright (C) 2016 Bastille Networks + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +""" +Utilities for extracting text from generated classes. +""" + +def is_string(txt): + if isinstance(txt, str): + return True + try: + if isinstance(txt, unicode): + return True + except NameError: + pass + return False + +def description(obj): + if obj is None: + return None + return description_bit(obj).strip() + +def description_bit(obj): + if hasattr(obj, 'content'): + contents = [description_bit(item) for item in obj.content] + result = ''.join(contents) + elif hasattr(obj, 'content_'): + contents = [description_bit(item) for item in obj.content_] + result = ''.join(contents) + elif hasattr(obj, 'value'): + result = description_bit(obj.value) + elif is_string(obj): + return obj + else: + raise StandardError('Expecting a string or something with content, content_ or value attribute') + # If this bit is a paragraph then add one some line breaks. + if hasattr(obj, 'name') and obj.name == 'para': + result += "\n\n" + return result diff --git a/docs/doxygen/other/group_defs.dox b/docs/doxygen/other/group_defs.dox new file mode 100644 index 0000000..c404d61 --- /dev/null +++ b/docs/doxygen/other/group_defs.dox @@ -0,0 +1,7 @@ +/*! + * \defgroup block GNU Radio NORDIC C++ Signal Processing Blocks + * \brief All C++ blocks that can be used from the NORDIC GNU Radio + * module are listed here or in the subcategories below. + * + */ + diff --git a/docs/doxygen/other/main_page.dox b/docs/doxygen/other/main_page.dox new file mode 100644 index 0000000..34a94f1 --- /dev/null +++ b/docs/doxygen/other/main_page.dox @@ -0,0 +1,10 @@ +/*! \mainpage + +Welcome to the GNU Radio NORDIC Block + +This is the intro page for the Doxygen manual generated for the NORDIC +block (docs/doxygen/other/main_page.dox). Edit it to add more detailed +documentation about the new GNU Radio modules contained in this +project. + +*/ diff --git a/docs/doxygen/swig_doc.py b/docs/doxygen/swig_doc.py new file mode 100644 index 0000000..0d4af35 --- /dev/null +++ b/docs/doxygen/swig_doc.py @@ -0,0 +1,252 @@ +''' + Copyright (C) 2016 Bastille Networks + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + +""" +Creates the swig_doc.i SWIG interface file. +Execute using: python swig_doc.py xml_path outputfilename + +The file instructs SWIG to transfer the doxygen comments into the +python docstrings. + +""" + +import sys + +try: + from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base +except ImportError: + from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base + + +def py_name(name): + bits = name.split('_') + return '_'.join(bits[1:]) + +def make_name(name): + bits = name.split('_') + return bits[0] + '_make_' + '_'.join(bits[1:]) + + +class Block(object): + """ + Checks if doxyxml produced objects correspond to a gnuradio block. + """ + + @classmethod + def includes(cls, item): + if not isinstance(item, DoxyClass): + return False + # Check for a parsing error. + if item.error(): + return False + return item.has_member(make_name(item.name()), DoxyFriend) + + +def utoascii(text): + """ + Convert unicode text into ascii and escape quotes. + """ + if text is None: + return '' + out = text.encode('ascii', 'replace') + out = out.replace('"', '\\"') + return out + + +def combine_descriptions(obj): + """ + Combines the brief and detailed descriptions of an object together. + """ + description = [] + bd = obj.brief_description.strip() + dd = obj.detailed_description.strip() + if bd: + description.append(bd) + if dd: + description.append(dd) + return utoascii('\n\n'.join(description)).strip() + + +entry_templ = '%feature("docstring") {name} "{docstring}"' +def make_entry(obj, name=None, templ="{description}", description=None): + """ + Create a docstring entry for a swig interface file. + + obj - a doxyxml object from which documentation will be extracted. + name - the name of the C object (defaults to obj.name()) + templ - an optional template for the docstring containing only one + variable named 'description'. + description - if this optional variable is set then it's value is + used as the description instead of extracting it from obj. + """ + if name is None: + name=obj.name() + if "operator " in name: + return '' + if description is None: + description = combine_descriptions(obj) + docstring = templ.format(description=description) + if not docstring: + return '' + return entry_templ.format( + name=name, + docstring=docstring, + ) + + +def make_func_entry(func, name=None, description=None, params=None): + """ + Create a function docstring entry for a swig interface file. + + func - a doxyxml object from which documentation will be extracted. + name - the name of the C object (defaults to func.name()) + description - if this optional variable is set then it's value is + used as the description instead of extracting it from func. + params - a parameter list that overrides using func.params. + """ + if params is None: + params = func.params + params = [prm.declname for prm in params] + if params: + sig = "Params: (%s)" % ", ".join(params) + else: + sig = "Params: (NONE)" + templ = "{description}\n\n" + sig + return make_entry(func, name=name, templ=utoascii(templ), + description=description) + + +def make_class_entry(klass, description=None): + """ + Create a class docstring for a swig interface file. + """ + output = [] + output.append(make_entry(klass, description=description)) + for func in klass.in_category(DoxyFunction): + name = klass.name() + '::' + func.name() + output.append(make_func_entry(func, name=name)) + return "\n\n".join(output) + + +def make_block_entry(di, block): + """ + Create class and function docstrings of a gnuradio block for a + swig interface file. + """ + descriptions = [] + # Get the documentation associated with the class. + class_desc = combine_descriptions(block) + if class_desc: + descriptions.append(class_desc) + # Get the documentation associated with the make function + make_func = di.get_member(make_name(block.name()), DoxyFunction) + make_func_desc = combine_descriptions(make_func) + if make_func_desc: + descriptions.append(make_func_desc) + # Get the documentation associated with the file + try: + block_file = di.get_member(block.name() + ".h", DoxyFile) + file_desc = combine_descriptions(block_file) + if file_desc: + descriptions.append(file_desc) + except base.Base.NoSuchMember: + # Don't worry if we can't find a matching file. + pass + # And join them all together to make a super duper description. + super_description = "\n\n".join(descriptions) + # Associate the combined description with the class and + # the make function. + output = [] + output.append(make_class_entry(block, description=super_description)) + creator = block.get_member(block.name(), DoxyFunction) + output.append(make_func_entry(make_func, description=super_description, + params=creator.params)) + return "\n\n".join(output) + + +def make_swig_interface_file(di, swigdocfilename, custom_output=None): + + output = [""" +/* + * This file was automatically generated using swig_doc.py. + * + * Any changes to it will be lost next time it is regenerated. + */ +"""] + + if custom_output is not None: + output.append(custom_output) + + # Create docstrings for the blocks. + blocks = di.in_category(Block) + make_funcs = set([]) + for block in blocks: + try: + make_func = di.get_member(make_name(block.name()), DoxyFunction) + make_funcs.add(make_func.name()) + output.append(make_block_entry(di, block)) + except block.ParsingError: + print('Parsing error for block %s' % block.name()) + + # Create docstrings for functions + # Don't include the make functions since they have already been dealt with. + funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs] + for f in funcs: + try: + output.append(make_func_entry(f)) + except f.ParsingError: + print('Parsing error for function %s' % f.name()) + + # Create docstrings for classes + block_names = [block.name() for block in blocks] + klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names] + for k in klasses: + try: + output.append(make_class_entry(k)) + except k.ParsingError: + print('Parsing error for class %s' % k.name()) + + # Docstrings are not created for anything that is not a function or a class. + # If this excludes anything important please add it here. + + output = "\n\n".join(output) + + swig_doc = file(swigdocfilename, 'w') + swig_doc.write(output) + swig_doc.close() + +if __name__ == "__main__": + # Parse command line options and set up doxyxml. + err_msg = "Execute using: python swig_doc.py xml_path outputfilename" + if len(sys.argv) != 3: + raise StandardError(err_msg) + xml_path = sys.argv[1] + swigdocfilename = sys.argv[2] + di = DoxyIndex(xml_path) + + # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined! + # This is presumably a bug in SWIG. + #msg_q = di.get_member(u'gr_msg_queue', DoxyClass) + #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction) + #delete_head = msg_q.get_member(u'delete_head', DoxyFunction) + output = [] + #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail')) + #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head')) + custom_output = "\n\n".join(output) + + # Generate the docstrings interface file. + make_swig_interface_file(di, swigdocfilename, custom_output=custom_output) diff --git a/examples/Frame_Sync_Example.grc b/examples/Frame_Sync_Example.grc new file mode 100644 index 0000000..5d503d2 --- /dev/null +++ b/examples/Frame_Sync_Example.grc @@ -0,0 +1,1311 @@ + + + + Sun Sep 4 20:59:56 2016 + + options + + author + Aaron Scher + + + window_size + 2000,2000 + + + category + [GRC Hier Blocks] + + + comment + + + + description + This example shows the basics of frame syncing. + + + _enabled + True + + + _coordinate + (8, 8) + + + _rotation + 0 + + + generate_options + qt_gui + + + hier_block_src_path + .: + + + id + top_block + + + max_nouts + 0 + + + qt_qss_theme + + + + realtime_scheduling + + + + run_command + {python} -u {filename} + + + run_options + prompt + + + run + True + + + sizing_mode + fixed + + + thread_safe_setters + + + + title + Frame Sync Example + + + placement + (0,0) + + + + variable + + comment + + + + _enabled + True + + + _coordinate + (8, 92) + + + _rotation + 0 + + + id + samp_rate + + + value + 32000 + + + + blocks_message_debug + + alias + + + + comment + Display the message that was sent. + + + affinity + + + + _enabled + 1 + + + _coordinate + (896, 208) + + + _rotation + 0 + + + id + blocks_message_debug_0 + + + + blocks_message_strobe + + alias + + + + comment + Generate hello world message + + + affinity + + + + _enabled + 1 + + + _coordinate + (192, 20) + + + _rotation + 0 + + + id + blocks_message_strobe_0_0 + + + maxoutbuf + 0 + + + msg + pmt.cons(pmt.make_dict(), pmt.pmt_to_python.numpy_to_uvector(numpy.array([ord(c) for c in "Hello world!"], numpy.uint8))) + + + minoutbuf + 0 + + + period + 500 + + + + blocks_pdu_to_tagged_stream + + alias + + + + comment + + + + affinity + + + + _enabled + True + + + _coordinate + (216, 124) + + + _rotation + 0 + + + id + blocks_pdu_to_tagged_stream_1 + + + type + byte + + + tag + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + blocks_repack_bits_bb + + k + 8 + + + l + hdr_const.bits_per_symbol() + + + alias + + + + comment + Access code block up ahead needs 1 byte +per bit + + + affinity + + + + _enabled + 1 + + + endianness + gr.GR_MSB_FIRST + + + _coordinate + (872, 84) + + + _rotation + 0 + + + id + blocks_repack_bits_bb_0_0 + + + len_tag_key + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + align_output + False + + + + blocks_repack_bits_bb + + k + 1 + + + l + 8 + + + alias + + + + comment + Tagged Stream to PDU block up ahead needs packed bytes + + + affinity + + + + _enabled + 1 + + + endianness + gr.GR_MSB_FIRST + + + _coordinate + (456, 260) + + + _rotation + 0 + + + id + blocks_repack_bits_bb_0_0_0 + + + len_tag_key + len_key2 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + align_output + False + + + + blocks_tag_gate + + alias + + + + comment + Simulate the "channel'... removes tags + + + affinity + + + + _enabled + 1 + + + _coordinate + (216, 204) + + + _rotation + 0 + + + id + blocks_tag_gate_0_0 + + + type + byte + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + propagate_tags + False + + + single_key + "" + + + vlen + 1 + + + + blocks_tagged_stream_mux + + alias + + + + comment + Append header to payload + + + affinity + + + + _enabled + True + + + _coordinate + (608, 80) + + + _rotation + 0 + + + id + blocks_tagged_stream_mux_0 + + + type + byte + + + lengthtagname + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + ninputs + 2 + + + tag_preserve_head_pos + 0 + + + vlen + 1 + + + + blocks_tagged_stream_to_pdu + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (680, 268) + + + _rotation + 0 + + + id + blocks_tagged_stream_to_pdu_0 + + + type + byte + + + tag + len_key2 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + blocks_throttle + + alias + + + + comment + Slow things down :) + + + affinity + + + + _enabled + True + + + _coordinate + (728, 4) + + + _rotation + 0 + + + id + blocks_throttle_0 + + + ignoretag + True + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + samples_per_second + samp_rate + + + type + byte + + + vlen + 1 + + + + digital_chunks_to_symbols_xx + + alias + + + + comment + To display in time domain we map individual +bits to (complex) symbols + + + affinity + + + + dimension + 1 + + + _enabled + 1 + + + _coordinate + (504, 356) + + + _rotation + 0 + + + id + digital_chunks_to_symbols_xx_0_0_0 + + + in_type + byte + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + num_ports + 1 + + + out_type + complex + + + symbol_table + hdr_const.points() + + + + digital_correlate_access_code_xx_ts + + access_code + digital.packet_utils.default_access_code + + + alias + + + + comment + Correlates with access code to extract the payload. + + + affinity + + + + _enabled + 1 + + + _coordinate + (208, 320) + + + _rotation + 0 + + + id + digital_correlate_access_code_xx_ts_1 + + + type + byte + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + tagname + len_key2 + + + threshold + 0 + + + + digital_protocol_formatter_bb + + alias + + + + comment + Generate header + + + affinity + + + + _enabled + True + + + format + hdr_format + + + _coordinate + (464, 4) + + + _rotation + 0 + + + id + digital_protocol_formatter_bb_0 + + + len_tag_key + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + parameter + + alias + + + + comment + + + + _enabled + 1 + + + _coordinate + (8, 184) + + + _rotation + 0 + + + id + hdr_const + + + label + Header constellation + + + short_id + + + + hide + none + + + type + + + + value + digital.constellation_calcdist((digital.psk_2()[0]), (digital.psk_2()[1]), 2, 1).base() + + + + parameter + + alias + + + + comment + + + + _enabled + 1 + + + _coordinate + (8, 248) + + + _rotation + 0 + + + id + hdr_format + + + label + Header Formatter + + + short_id + + + + hide + none + + + type + + + + value + digital.header_format_default(digital.packet_utils.default_access_code, 0) + + + + import + + alias + + + + comment + + + + _enabled + True + + + _coordinate + (8, 148) + + + _rotation + 0 + + + id + import_0 + + + import + import numpy + + + + qtgui_time_sink_x + + autoscale + True + + + axislabels + True + + + alias + + + + comment + Display payloud in time domain + + + ctrlpanel + False + + + affinity + + + + entags + True + + + _enabled + 1 + + + _coordinate + (760, 352) + + + gui_hint + + + + _rotation + 0 + + + grid + False + + + id + qtgui_time_sink_x_0_1_0 + + + legend + True + + + alpha1 + 1.0 + + + color1 + "blue" + + + label1 + + + + marker1 + -1 + + + style1 + 1 + + + width1 + 1 + + + alpha10 + 1.0 + + + color10 + "blue" + + + label10 + + + + marker10 + -1 + + + style10 + 1 + + + width10 + 1 + + + alpha2 + 1.0 + + + color2 + "red" + + + label2 + + + + marker2 + -1 + + + style2 + 1 + + + width2 + 1 + + + alpha3 + 1.0 + + + color3 + "green" + + + label3 + + + + marker3 + -1 + + + style3 + 1 + + + width3 + 1 + + + alpha4 + 1.0 + + + color4 + "black" + + + label4 + + + + marker4 + -1 + + + style4 + 1 + + + width4 + 1 + + + alpha5 + 1.0 + + + color5 + "cyan" + + + label5 + + + + marker5 + -1 + + + style5 + 1 + + + width5 + 1 + + + alpha6 + 1.0 + + + color6 + "magenta" + + + label6 + + + + marker6 + -1 + + + style6 + 1 + + + width6 + 1 + + + alpha7 + 1.0 + + + color7 + "yellow" + + + label7 + + + + marker7 + -1 + + + style7 + 1 + + + width7 + 1 + + + alpha8 + 1.0 + + + color8 + "dark red" + + + label8 + + + + marker8 + -1 + + + style8 + 1 + + + width8 + 1 + + + alpha9 + 1.0 + + + color9 + "dark green" + + + label9 + + + + marker9 + -1 + + + style9 + 1 + + + width9 + 1 + + + name + "" + + + nconnections + 1 + + + size + 1024 + + + srate + samp_rate + + + stemplot + False + + + tr_chan + 0 + + + tr_delay + 0 + + + tr_level + 0.0 + + + tr_mode + qtgui.TRIG_MODE_FREE + + + tr_slope + qtgui.TRIG_SLOPE_POS + + + tr_tag + "" + + + type + complex + + + update_time + 0.10 + + + ylabel + Amplitude + + + yunit + "" + + + ymax + 1 + + + ymin + -1 + + + + blocks_message_strobe_0_0 + blocks_pdu_to_tagged_stream_1 + strobe + pdus + + + blocks_pdu_to_tagged_stream_1 + blocks_tagged_stream_mux_0 + 0 + 1 + + + blocks_pdu_to_tagged_stream_1 + digital_protocol_formatter_bb_0 + 0 + 0 + + + blocks_repack_bits_bb_0_0 + blocks_tag_gate_0_0 + 0 + 0 + + + blocks_repack_bits_bb_0_0_0 + blocks_tagged_stream_to_pdu_0 + 0 + 0 + + + blocks_tag_gate_0_0 + digital_correlate_access_code_xx_ts_1 + 0 + 0 + + + blocks_tagged_stream_mux_0 + blocks_throttle_0 + 0 + 0 + + + blocks_tagged_stream_to_pdu_0 + blocks_message_debug_0 + pdus + print + + + blocks_throttle_0 + blocks_repack_bits_bb_0_0 + 0 + 0 + + + digital_chunks_to_symbols_xx_0_0_0 + qtgui_time_sink_x_0_1_0 + 0 + 0 + + + digital_correlate_access_code_xx_ts_1 + blocks_repack_bits_bb_0_0_0 + 0 + 0 + + + digital_correlate_access_code_xx_ts_1 + digital_chunks_to_symbols_xx_0_0_0 + 0 + 0 + + + digital_protocol_formatter_bb_0 + blocks_tagged_stream_mux_0 + 0 + 0 + + diff --git a/examples/HackRF_RTLSDR_loopback_test.grc b/examples/HackRF_RTLSDR_loopback_test.grc new file mode 100644 index 0000000..8e3ff2a --- /dev/null +++ b/examples/HackRF_RTLSDR_loopback_test.grc @@ -0,0 +1,5622 @@ + + + + Sun Sep 4 20:59:56 2016 + + options + + author + Aaron Scher + + + window_size + 2000,2000 + + + category + [GRC Hier Blocks] + + + comment + + + + description + Transmit and receive example + + + _enabled + True + + + _coordinate + (8, 8) + + + _rotation + 0 + + + generate_options + qt_gui + + + hier_block_src_path + .: + + + id + top_block + + + max_nouts + 0 + + + qt_qss_theme + + + + realtime_scheduling + + + + run_command + {python} -u {filename} + + + run_options + prompt + + + run + True + + + sizing_mode + fixed + + + thread_safe_setters + + + + title + HackRF - RTLSDR loop back test + + + placement + (0,0) + + + + variable + + comment + + + + _enabled + True + + + _coordinate + (872, 4) + + + _rotation + 0 + + + id + EBW + + + value + .35 + + + + variable_rrc_filter_taps + + comment + Root raised cosine definition. + + + _enabled + 1 + + + alpha + EBW + + + _coordinate + (760, 4) + + + _rotation + 0 + + + gain + nfilts + + + id + RRC_filter_taps + + + ntaps + 5*sps_TX*nfilts + + + samp_rate + nfilts + + + sym_rate + 1.0 + + + + variable + + comment + RF carrier frequency + + + _enabled + True + + + _coordinate + (472, 4) + + + _rotation + 0 + + + id + center_freq + + + value + 430E6 + + + + variable + + comment + Digitally upcovert transmitted +signal by this value to avoid +DC spurs, etc. +At the receiver end, we will +need to downconvert signal +by this value. + + + _enabled + True + + + _coordinate + (312, 4) + + + _rotation + 0 + + + id + freq_offset_value + + + value + 30E3 + + + + variable + + comment + + + + _enabled + True + + + _coordinate + (872, 60) + + + _rotation + 0 + + + id + nfilts + + + value + 32 + + + + variable + + comment + Sample rate - +determines BW +of transmitter and receiver. +This should be larger +than BW of signal itself. + + + _enabled + True + + + _coordinate + (160, 4) + + + _rotation + 0 + + + id + samp_rate + + + value + 1e6 + + + + variable + + comment + Samples per +signal at +receiver + + + _enabled + True + + + _coordinate + (88, 108) + + + _rotation + 0 + + + id + sps_RX + + + value + 40/10 + + + + variable + + comment + Samples +per signal +at transmitter + + + _enabled + True + + + _coordinate + (8, 108) + + + _rotation + 0 + + + id + sps_TX + + + value + 40 + + + + analog_feedforward_agc_cc + + alias + + + + comment + Optional AGC + + + affinity + + + + _enabled + 2 + + + _coordinate + (432, 972) + + + _rotation + 0 + + + id + analog_feedforward_agc_cc_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + num_samples + 1024/2 + + + reference + 1.0 + + + + analog_pwr_squelch_xx + + alpha + .01 + + + alias + + + + comment + Optional power squelch + + + affinity + + + + _enabled + 2 + + + _coordinate + (224, 888) + + + _rotation + 0 + + + gate + True + + + id + analog_pwr_squelch_xx_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + ramp + 0 + + + threshold + -20 + + + type + complex + + + + analog_sig_source_x + + amp + 1 + + + alias + + + + comment + Local oscillator + + + affinity + + + + _enabled + 1 + + + freq + freq_offset_value + + + _coordinate + (840, 632) + + + _rotation + 90 + + + id + analog_sig_source_x_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + offset + 0 + + + type + complex + + + samp_rate + samp_rate + + + waveform + analog.GR_COS_WAVE + + + + analog_sig_source_x + + amp + 1 + + + alias + + + + comment + local oscillator + + + affinity + + + + _enabled + True + + + freq + -freq_offset_value + + + _coordinate + (24, 1132) + + + _rotation + 0 + + + id + analog_sig_source_x_1 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + offset + 0 + + + type + complex + + + samp_rate + samp_rate + + + waveform + analog.GR_COS_WAVE + + + + blocks_message_debug + + alias + + + + comment + Display recovered +message + + + affinity + + + + _enabled + 1 + + + _coordinate + (1008, 1160) + + + _rotation + 0 + + + id + blocks_message_debug_0 + + + + blocks_message_strobe + + alias + + + + comment + Generate hello world message + + + affinity + + + + _enabled + 0 + + + _coordinate + (24, 276) + + + _rotation + 0 + + + id + blocks_message_strobe_0_0 + + + maxoutbuf + 0 + + + msg + pmt.cons(pmt.make_dict(), pmt.pmt_to_python.numpy_to_uvector(numpy.array([ord(c) for c in "Hello world!"], numpy.uint8))) + + + minoutbuf + 0 + + + period + 500 + + + + blocks_multiply_const_vxx + + alias + + + + comment + + + + const + 0.5 + + + affinity + + + + _enabled + 1 + + + _coordinate + (640, 564) + + + _rotation + 0 + + + id + blocks_multiply_const_vxx_0 + + + type + complex + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + vlen + 1 + + + + blocks_multiply_xx + + alias + + + + comment + Perform frequency tranlation +(up convert) + + + affinity + + + + _enabled + 1 + + + _coordinate + (952, 472) + + + _rotation + 0 + + + id + blocks_multiply_xx_0 + + + type + complex + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + num_inputs + 2 + + + vlen + 1 + + + + blocks_multiply_xx + + alias + + + + comment + Perform frequency translation +(down convert) + + + affinity + + + + _enabled + True + + + _coordinate + (216, 1128) + + + _rotation + 0 + + + id + blocks_multiply_xx_1 + + + type + complex + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + num_inputs + 2 + + + vlen + 1 + + + + blocks_pdu_to_tagged_stream + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (120, 620) + + + _rotation + 0 + + + id + blocks_pdu_to_tagged_stream_1 + + + type + byte + + + tag + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + blocks_repack_bits_bb + + k + 8 + + + l + my_const.bits_per_symbol() + + + alias + + + + comment + Unpack bytes + + + affinity + + + + _enabled + 1 + + + endianness + gr.GR_MSB_FIRST + + + _coordinate + (432, 452) + + + _rotation + 0 + + + id + blocks_repack_bits_bb_0_0 + + + len_tag_key + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + align_output + False + + + + blocks_repack_bits_bb + + k + 1 + + + l + 8 + + + alias + + + + comment + Tagged Stream to PDU block up ahead needs +packed bytes + + + affinity + + + + _enabled + 1 + + + endianness + gr.GR_MSB_FIRST + + + _coordinate + (872, 972) + + + _rotation + 0 + + + id + blocks_repack_bits_bb_0_0_0 + + + len_tag_key + len_key2 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + align_output + False + + + + blocks_tagged_stream_multiply_length + + alias + + + + comment + Fix length tag to reflect the resampling + + + affinity + + + + _enabled + 1 + + + _coordinate + (576, 624) + + + _rotation + 0 + + + id + blocks_tagged_stream_multiply_length_0 + + + type + complex + + + c + sps_TX + + + lengthtagname + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + vlen + 1 + + + + blocks_tagged_stream_mux + + alias + + + + comment + Append header to payload + + + affinity + + + + _enabled + 1 + + + _coordinate + (352, 552) + + + _rotation + 0 + + + id + blocks_tagged_stream_mux_0 + + + type + byte + + + lengthtagname + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + ninputs + 2 + + + tag_preserve_head_pos + 0 + + + vlen + 1 + + + + blocks_tagged_stream_to_pdu + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (888, 1100) + + + _rotation + 0 + + + id + blocks_tagged_stream_to_pdu_0 + + + type + byte + + + tag + len_key2 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + digital_binary_slicer_fb + + alias + + + + comment + + + + affinity + + + + _enabled + True + + + _coordinate + (632, 880) + + + _rotation + 270 + + + id + digital_binary_slicer_fb_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + digital_burst_shaper_xx + + alias + burst_shaper0 + + + comment + Zero pad, etc. + + + affinity + + + + _enabled + 1 + + + _coordinate + (688, 272) + + + _rotation + 0 + + + id + digital_burst_shaper_xx_0 + + + insert_phasing + True + + + length_tag_name + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + post_padding + 4000 + + + pre_padding + 4000 + + + type + complex + + + window + numpy.ones(500) + + + + digital_chunks_to_symbols_xx + + alias + + + + comment + Binary 1's and 0's to symbols. + + + affinity + + + + dimension + 1 + + + _enabled + 1 + + + _coordinate + (464, 324) + + + _rotation + 0 + + + id + digital_chunks_to_symbols_xx_0_0 + + + in_type + byte + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + num_ports + 1 + + + out_type + complex + + + symbol_table + my_const.points() + + + + digital_cma_equalizer_cc + + alias + + + + comment + Channel equalizer + + + affinity + + + + _enabled + 1 + + + _coordinate + (656, 1088) + + + _rotation + 0 + + + mu + .01 + + + id + digital_cma_equalizer_cc_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + modulus + 1 + + + num_taps + 11 + + + sps + 1 + + + + digital_constellation_soft_decoder_cf + + alias + + + + comment + Decision + + + constellation + my_const + + + affinity + + + + _enabled + True + + + _coordinate + (648, 1020) + + + _rotation + 180 + + + id + digital_constellation_soft_decoder_cf_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + digital_correlate_access_code_xx_ts + + access_code + digital.packet_utils.default_access_code + + + alias + + + + comment + Correlates with access code to extract the payload. + + + affinity + + + + _enabled + 1 + + + _coordinate + (800, 844) + + + _rotation + 0 + + + id + digital_correlate_access_code_xx_ts_1_0 + + + type + byte + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + tagname + len_key2 + + + threshold + 2 + + + + digital_costas_loop_cc + + alias + + + + comment + Fine carrier frequency and phase sync + + + affinity + + + + _enabled + True + + + _coordinate + (656, 1208) + + + _rotation + 0 + + + id + digital_costas_loop_cc_0 + + + w + .01 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + order + 2 + + + use_snr + False + + + + digital_crc32_async_bb + + alias + + + + comment + Error detection + + + affinity + + + + _enabled + 1 + + + _coordinate + (848, 1156) + + + _rotation + 0 + + + id + digital_crc32_async_bb_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + check + True + + + + digital_crc32_async_bb + + alias + + + + comment + Apply +error +correction + + + affinity + + + + _enabled + 1 + + + _coordinate + (108, 440) + + + _rotation + 270 + + + id + digital_crc32_async_bb_1 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + check + False + + + + digital_diff_decoder_bb + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (700, 856) + + + _rotation + 90 + + + id + digital_diff_decoder_bb_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + modulus + 2 + + + + digital_diff_encoder_bb + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (272, 364) + + + _rotation + 0 + + + id + digital_diff_encoder_bb_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + modulus + 2 + + + + digital_fll_band_edge_cc + + alias + + + + comment + Carrier frequency sync + + + affinity + + + + _enabled + 1 + + + rolloff + EBW + + + _coordinate + (448, 1064) + + + _rotation + 0 + + + id + digital_fll_band_edge_cc_0 + + + w + .02 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + filter_size + 45 + + + samps_per_sym + sps_RX + + + type + cc + + + + digital_pfb_clock_sync_xxx + + alias + + + + comment + Timing recovery (sync) + + + affinity + + + + _enabled + True + + + filter_size + nfilts + + + _coordinate + (432, 800) + + + _rotation + 0 + + + id + digital_pfb_clock_sync_xxx_0 + + + init_phase + nfilts/2 + + + loop_bw + 6.28/400.0*2 + + + maxoutbuf + 0 + + + max_dev + 1.5 + + + minoutbuf + 0 + + + osps + 1 + + + sps + sps_RX + + + taps + RRC_filter_taps + + + type + ccf + + + + digital_protocol_formatter_bb + + alias + + + + comment + Generate header + + + affinity + + + + _enabled + 1 + + + format + hdr_format + + + _coordinate + (208, 476) + + + _rotation + 0 + + + id + digital_protocol_formatter_bb_0 + + + len_tag_key + len_key + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + + epy_block + + alias + + + + _io_cache + ('Embedded Python Block', 'msg_block', [], [('msg_in', 'message', 1)], [('msg_out', 'message', 1)], 'Convert strings to uint8 vectors', []) + + + _source_code + """ +Embedded Python Blocks: + +Each time this file is saved, GRC will instantiate the first class it finds +to get ports and parameters of your block. The arguments to __init__ will +be the parameters. All of them are required to have default values! +""" + +import numpy as np +import pylab +from gnuradio import gr +import pmt + + +class msg_block(gr.basic_block): # other base classes are basic_block, decim_block, interp_block + """Convert strings to uint8 vectors""" + + def __init__(self): # only default arguments here + """arguments to this function show up as parameters in GRC""" + gr.basic_block.__init__( + self, + name='Embedded Python Block', # will show up in GRC + in_sig=None, + out_sig=None + ) + self.message_port_register_out(pmt.intern('msg_out')) + self.message_port_register_in(pmt.intern('msg_in')) + self.set_msg_handler(pmt.intern('msg_in'), self.handle_msg) + + def handle_msg(self, msg): + + nvec = pmt.to_python(msg) + + self.message_port_pub(pmt.intern('msg_out'), pmt.cons(pmt.make_dict(), pmt.pmt_to_python.numpy_to_uvector(np.array([ord(c) for c in nvec], np.uint8)))) + + + def work(self, input_items, output_items): + pass + + + + comment + Convert strings from previous block +to uint8 vectors + + + _enabled + 1 + + + _coordinate + (56, 344) + + + _rotation + 0 + + + id + epy_block_0 + + + + parameter + + alias + + + + comment + Header format definition + + + _enabled + 1 + + + _coordinate + (584, 120) + + + _rotation + 0 + + + id + hdr_format + + + label + Header Formatter + + + short_id + + + + hide + none + + + type + + + + value + digital.header_format_default(digital.packet_utils.default_access_code, 0) + + + + import + + alias + + + + comment + Custom Embedded +Python Block uses +this library. + + + _enabled + True + + + _coordinate + (472, 116) + + + _rotation + 0 + + + id + import_0 + + + import + import numpy + + + + parameter + + alias + + + + comment + Constellation definition + + + _enabled + 1 + + + _coordinate + (592, 0) + + + _rotation + 0 + + + id + my_const + + + label + Constellation + + + short_id + + + + hide + none + + + type + + + + value + digital.constellation_calcdist((digital.psk_2()[0]), (digital.psk_2()[1]), 2, 1).base() + + + + osmosdr_sink + + alias + + + + ant0 + + + + bb_gain0 + 20 + + + bw0 + 0 + + + corr0 + 0 + + + freq0 + center_freq + + + if_gain0 + 10 + + + gain0 + 30 + + + ant10 + + + + bb_gain10 + 20 + + + bw10 + 0 + + + corr10 + 0 + + + freq10 + 100e6 + + + if_gain10 + 20 + + + gain10 + 10 + + + ant11 + + + + bb_gain11 + 20 + + + bw11 + 0 + + + corr11 + 0 + + + freq11 + 100e6 + + + if_gain11 + 20 + + + gain11 + 10 + + + ant12 + + + + bb_gain12 + 20 + + + bw12 + 0 + + + corr12 + 0 + + + freq12 + 100e6 + + + if_gain12 + 20 + + + gain12 + 10 + + + ant13 + + + + bb_gain13 + 20 + + + bw13 + 0 + + + corr13 + 0 + + + freq13 + 100e6 + + + if_gain13 + 20 + + + gain13 + 10 + + + ant14 + + + + bb_gain14 + 20 + + + bw14 + 0 + + + corr14 + 0 + + + freq14 + 100e6 + + + if_gain14 + 20 + + + gain14 + 10 + + + ant15 + + + + bb_gain15 + 20 + + + bw15 + 0 + + + corr15 + 0 + + + freq15 + 100e6 + + + if_gain15 + 20 + + + gain15 + 10 + + + ant16 + + + + bb_gain16 + 20 + + + bw16 + 0 + + + corr16 + 0 + + + freq16 + 100e6 + + + if_gain16 + 20 + + + gain16 + 10 + + + ant17 + + + + bb_gain17 + 20 + + + bw17 + 0 + + + corr17 + 0 + + + freq17 + 100e6 + + + if_gain17 + 20 + + + gain17 + 10 + + + ant18 + + + + bb_gain18 + 20 + + + bw18 + 0 + + + corr18 + 0 + + + freq18 + 100e6 + + + if_gain18 + 20 + + + gain18 + 10 + + + ant19 + + + + bb_gain19 + 20 + + + bw19 + 0 + + + corr19 + 0 + + + freq19 + 100e6 + + + if_gain19 + 20 + + + gain19 + 10 + + + ant1 + + + + bb_gain1 + 20 + + + bw1 + 0 + + + corr1 + 0 + + + freq1 + 100e6 + + + if_gain1 + 20 + + + gain1 + 10 + + + ant20 + + + + bb_gain20 + 20 + + + bw20 + 0 + + + corr20 + 0 + + + freq20 + 100e6 + + + if_gain20 + 20 + + + gain20 + 10 + + + ant21 + + + + bb_gain21 + 20 + + + bw21 + 0 + + + corr21 + 0 + + + freq21 + 100e6 + + + if_gain21 + 20 + + + gain21 + 10 + + + ant22 + + + + bb_gain22 + 20 + + + bw22 + 0 + + + corr22 + 0 + + + freq22 + 100e6 + + + if_gain22 + 20 + + + gain22 + 10 + + + ant23 + + + + bb_gain23 + 20 + + + bw23 + 0 + + + corr23 + 0 + + + freq23 + 100e6 + + + if_gain23 + 20 + + + gain23 + 10 + + + ant24 + + + + bb_gain24 + 20 + + + bw24 + 0 + + + corr24 + 0 + + + freq24 + 100e6 + + + if_gain24 + 20 + + + gain24 + 10 + + + ant25 + + + + bb_gain25 + 20 + + + bw25 + 0 + + + corr25 + 0 + + + freq25 + 100e6 + + + if_gain25 + 20 + + + gain25 + 10 + + + ant26 + + + + bb_gain26 + 20 + + + bw26 + 0 + + + corr26 + 0 + + + freq26 + 100e6 + + + if_gain26 + 20 + + + gain26 + 10 + + + ant27 + + + + bb_gain27 + 20 + + + bw27 + 0 + + + corr27 + 0 + + + freq27 + 100e6 + + + if_gain27 + 20 + + + gain27 + 10 + + + ant28 + + + + bb_gain28 + 20 + + + bw28 + 0 + + + corr28 + 0 + + + freq28 + 100e6 + + + if_gain28 + 20 + + + gain28 + 10 + + + ant29 + + + + bb_gain29 + 20 + + + bw29 + 0 + + + corr29 + 0 + + + freq29 + 100e6 + + + if_gain29 + 20 + + + gain29 + 10 + + + ant2 + + + + bb_gain2 + 20 + + + bw2 + 0 + + + corr2 + 0 + + + freq2 + 100e6 + + + if_gain2 + 20 + + + gain2 + 10 + + + ant30 + + + + bb_gain30 + 20 + + + bw30 + 0 + + + corr30 + 0 + + + freq30 + 100e6 + + + if_gain30 + 20 + + + gain30 + 10 + + + ant31 + + + + bb_gain31 + 20 + + + bw31 + 0 + + + corr31 + 0 + + + freq31 + 100e6 + + + if_gain31 + 20 + + + gain31 + 10 + + + ant3 + + + + bb_gain3 + 20 + + + bw3 + 0 + + + corr3 + 0 + + + freq3 + 100e6 + + + if_gain3 + 20 + + + gain3 + 10 + + + ant4 + + + + bb_gain4 + 20 + + + bw4 + 0 + + + corr4 + 0 + + + freq4 + 100e6 + + + if_gain4 + 20 + + + gain4 + 10 + + + ant5 + + + + bb_gain5 + 20 + + + bw5 + 0 + + + corr5 + 0 + + + freq5 + 100e6 + + + if_gain5 + 20 + + + gain5 + 10 + + + ant6 + + + + bb_gain6 + 20 + + + bw6 + 0 + + + corr6 + 0 + + + freq6 + 100e6 + + + if_gain6 + 20 + + + gain6 + 10 + + + ant7 + + + + bb_gain7 + 20 + + + bw7 + 0 + + + corr7 + 0 + + + freq7 + 100e6 + + + if_gain7 + 20 + + + gain7 + 10 + + + ant8 + + + + bb_gain8 + 20 + + + bw8 + 0 + + + corr8 + 0 + + + freq8 + 100e6 + + + if_gain8 + 20 + + + gain8 + 10 + + + ant9 + + + + bb_gain9 + 20 + + + bw9 + 0 + + + corr9 + 0 + + + freq9 + 100e6 + + + if_gain9 + 20 + + + gain9 + 10 + + + comment + Send our little packet of information out +into the wild, luminiferous aether + + + affinity + + + + args + + + + _enabled + 1 + + + _coordinate + (1096, 264) + + + _rotation + 0 + + + id + osmosdr_sink_0_0 + + + type + fc32 + + + clock_source0 + + + + time_source0 + + + + clock_source1 + + + + time_source1 + + + + clock_source2 + + + + time_source2 + + + + clock_source3 + + + + time_source3 + + + + clock_source4 + + + + time_source4 + + + + clock_source5 + + + + time_source5 + + + + clock_source6 + + + + time_source6 + + + + clock_source7 + + + + time_source7 + + + + nchan + 1 + + + num_mboards + 1 + + + sample_rate + samp_rate + + + sync + + + + + pfb_arb_resampler_xxx + + alias + + + + comment + Resample. Apply RRC filter + + + affinity + + + + _enabled + 1 + + + _coordinate + (624, 440) + + + _rotation + 0 + + + id + pfb_arb_resampler_xxx_0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + nfilts + nfilts + + + rrate + sps_TX + + + samp_delay + 0 + + + atten + 100 + + + taps + RRC_filter_taps + + + type + ccf + + + + pluto_source + + bbdc + True + + + alias + + + + buffer_size + 0x8000 + + + comment + + + + affinity + + + + uri + + + + _enabled + True + + + auto_filter + True + + + filter + + + + _coordinate + (264, 656) + + + _rotation + 0 + + + gain + "manual" + + + id + pluto_source_0 + + + frequency + center_freq-40E3 + + + manual_gain + 60.0 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + quadrature + True + + + rfdc + True + + + bandwidth + 1000000 + + + samplerate + samp_rate + + + + qtgui_edit_box_msg + + alias + + + + comment + + + + affinity + + + + _enabled + 1 + + + _coordinate + (12, 424) + + + gui_hint + + + + _rotation + 90 + + + id + qtgui_edit_box_msg_0 + + + key + + + + label + + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + is_pair + False + + + is_static + False + + + type + string + + + value + + + + + qtgui_freq_sink_x + + autoscale + False + + + average + 1.0 + + + axislabels + True + + + bw + samp_rate/int(sps_TX/sps_RX) + + + alias + + + + fc + 0 + + + comment + + + + ctrlpanel + False + + + affinity + + + + _enabled + True + + + fftsize + 1024 + + + _coordinate + (48, 848) + + + gui_hint + + + + _rotation + 180 + + + grid + False + + + id + qtgui_freq_sink_x_0 + + + legend + True + + + alpha1 + 1.0 + + + color1 + "blue" + + + label1 + + + + width1 + 1 + + + alpha10 + 1.0 + + + color10 + "dark blue" + + + label10 + + + + width10 + 1 + + + alpha2 + 1.0 + + + color2 + "red" + + + label2 + + + + width2 + 1 + + + alpha3 + 1.0 + + + color3 + "green" + + + label3 + + + + width3 + 1 + + + alpha4 + 1.0 + + + color4 + "black" + + + label4 + + + + width4 + 1 + + + alpha5 + 1.0 + + + color5 + "cyan" + + + label5 + + + + width5 + 1 + + + alpha6 + 1.0 + + + color6 + "magenta" + + + label6 + + + + width6 + 1 + + + alpha7 + 1.0 + + + color7 + "yellow" + + + label7 + + + + width7 + 1 + + + alpha8 + 1.0 + + + color8 + "dark red" + + + label8 + + + + width8 + 1 + + + alpha9 + 1.0 + + + color9 + "dark green" + + + label9 + + + + width9 + 1 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + name + "" + + + nconnections + 1 + + + showports + True + + + freqhalf + True + + + tr_chan + 0 + + + tr_level + 0.0 + + + tr_mode + qtgui.TRIG_MODE_FREE + + + tr_tag + "" + + + type + complex + + + update_time + 0.10 + + + wintype + firdes.WIN_BLACKMAN_hARRIS + + + label + Relative Gain + + + ymax + 10 + + + ymin + -140 + + + units + dB + + + + qtgui_time_sink_x + + autoscale + False + + + axislabels + True + + + alias + + + + comment + + + + ctrlpanel + False + + + affinity + + + + entags + True + + + _enabled + True + + + _coordinate + (1096, 580) + + + gui_hint + + + + _rotation + 0 + + + grid + False + + + id + qtgui_time_sink_x_0 + + + legend + True + + + alpha1 + 1.0 + + + color1 + "blue" + + + label1 + + + + marker1 + -1 + + + style1 + 1 + + + width1 + 1 + + + alpha10 + 1.0 + + + color10 + "blue" + + + label10 + + + + marker10 + -1 + + + style10 + 1 + + + width10 + 1 + + + alpha2 + 1.0 + + + color2 + "red" + + + label2 + + + + marker2 + -1 + + + style2 + 1 + + + width2 + 1 + + + alpha3 + 1.0 + + + color3 + "green" + + + label3 + + + + marker3 + -1 + + + style3 + 1 + + + width3 + 1 + + + alpha4 + 1.0 + + + color4 + "black" + + + label4 + + + + marker4 + -1 + + + style4 + 1 + + + width4 + 1 + + + alpha5 + 1.0 + + + color5 + "cyan" + + + label5 + + + + marker5 + -1 + + + style5 + 1 + + + width5 + 1 + + + alpha6 + 1.0 + + + color6 + "magenta" + + + label6 + + + + marker6 + -1 + + + style6 + 1 + + + width6 + 1 + + + alpha7 + 1.0 + + + color7 + "yellow" + + + label7 + + + + marker7 + -1 + + + style7 + 1 + + + width7 + 1 + + + alpha8 + 1.0 + + + color8 + "dark red" + + + label8 + + + + marker8 + -1 + + + style8 + 1 + + + width8 + 1 + + + alpha9 + 1.0 + + + color9 + "dark green" + + + label9 + + + + marker9 + -1 + + + style9 + 1 + + + width9 + 1 + + + name + "" + + + nconnections + 1 + + + size + 1024*256 + + + srate + samp_rate + + + stemplot + False + + + tr_chan + 0 + + + tr_delay + 0 + + + tr_level + 0.0 + + + tr_mode + qtgui.TRIG_MODE_FREE + + + tr_slope + qtgui.TRIG_SLOPE_POS + + + tr_tag + "len_key" + + + type + complex + + + update_time + 0.10 + + + ylabel + Amplitude + + + yunit + "" + + + ymax + 1 + + + ymin + -1 + + + + rational_resampler_xxx + + alias + + + + comment + Downsample and LPF + + + affinity + + + + decim + int(sps_TX/sps_RX) + + + _enabled + True + + + fbw + 0 + + + _coordinate + (240, 1016) + + + _rotation + 0 + + + id + rational_resampler_xxx_0 + + + interp + 1 + + + maxoutbuf + 0 + + + minoutbuf + 0 + + + taps + + + + type + ccc + + + + rtlsdr_source + + alias + + + + ant0 + + + + bb_gain0 + 0 + + + bw0 + 0 + + + dc_offset_mode0 + 2 + + + corr0 + 0 + + + freq0 + center_freq-40E3 + + + gain_mode0 + True + + + if_gain0 + 0 + + + iq_balance_mode0 + 2 + + + gain0 + 40 + + + ant10 + + + + bb_gain10 + 20 + + + bw10 + 0 + + + dc_offset_mode10 + 0 + + + corr10 + 0 + + + freq10 + 100e6 + + + gain_mode10 + False + + + if_gain10 + 20 + + + iq_balance_mode10 + 0 + + + gain10 + 10 + + + ant11 + + + + bb_gain11 + 20 + + + bw11 + 0 + + + dc_offset_mode11 + 0 + + + corr11 + 0 + + + freq11 + 100e6 + + + gain_mode11 + False + + + if_gain11 + 20 + + + iq_balance_mode11 + 0 + + + gain11 + 10 + + + ant12 + + + + bb_gain12 + 20 + + + bw12 + 0 + + + dc_offset_mode12 + 0 + + + corr12 + 0 + + + freq12 + 100e6 + + + gain_mode12 + False + + + if_gain12 + 20 + + + iq_balance_mode12 + 0 + + + gain12 + 10 + + + ant13 + + + + bb_gain13 + 20 + + + bw13 + 0 + + + dc_offset_mode13 + 0 + + + corr13 + 0 + + + freq13 + 100e6 + + + gain_mode13 + False + + + if_gain13 + 20 + + + iq_balance_mode13 + 0 + + + gain13 + 10 + + + ant14 + + + + bb_gain14 + 20 + + + bw14 + 0 + + + dc_offset_mode14 + 0 + + + corr14 + 0 + + + freq14 + 100e6 + + + gain_mode14 + False + + + if_gain14 + 20 + + + iq_balance_mode14 + 0 + + + gain14 + 10 + + + ant15 + + + + bb_gain15 + 20 + + + bw15 + 0 + + + dc_offset_mode15 + 0 + + + corr15 + 0 + + + freq15 + 100e6 + + + gain_mode15 + False + + + if_gain15 + 20 + + + iq_balance_mode15 + 0 + + + gain15 + 10 + + + ant16 + + + + bb_gain16 + 20 + + + bw16 + 0 + + + dc_offset_mode16 + 0 + + + corr16 + 0 + + + freq16 + 100e6 + + + gain_mode16 + False + + + if_gain16 + 20 + + + iq_balance_mode16 + 0 + + + gain16 + 10 + + + ant17 + + + + bb_gain17 + 20 + + + bw17 + 0 + + + dc_offset_mode17 + 0 + + + corr17 + 0 + + + freq17 + 100e6 + + + gain_mode17 + False + + + if_gain17 + 20 + + + iq_balance_mode17 + 0 + + + gain17 + 10 + + + ant18 + + + + bb_gain18 + 20 + + + bw18 + 0 + + + dc_offset_mode18 + 0 + + + corr18 + 0 + + + freq18 + 100e6 + + + gain_mode18 + False + + + if_gain18 + 20 + + + iq_balance_mode18 + 0 + + + gain18 + 10 + + + ant19 + + + + bb_gain19 + 20 + + + bw19 + 0 + + + dc_offset_mode19 + 0 + + + corr19 + 0 + + + freq19 + 100e6 + + + gain_mode19 + False + + + if_gain19 + 20 + + + iq_balance_mode19 + 0 + + + gain19 + 10 + + + ant1 + + + + bb_gain1 + 20 + + + bw1 + 0 + + + dc_offset_mode1 + 0 + + + corr1 + 0 + + + freq1 + 100e6 + + + gain_mode1 + False + + + if_gain1 + 20 + + + iq_balance_mode1 + 0 + + + gain1 + 1 + + + ant20 + + + + bb_gain20 + 20 + + + bw20 + 0 + + + dc_offset_mode20 + 0 + + + corr20 + 0 + + + freq20 + 100e6 + + + gain_mode20 + False + + + if_gain20 + 20 + + + iq_balance_mode20 + 0 + + + gain20 + 10 + + + ant21 + + + + bb_gain21 + 20 + + + bw21 + 0 + + + dc_offset_mode21 + 0 + + + corr21 + 0 + + + freq21 + 100e6 + + + gain_mode21 + False + + + if_gain21 + 20 + + + iq_balance_mode21 + 0 + + + gain21 + 10 + + + ant22 + + + + bb_gain22 + 20 + + + bw22 + 0 + + + dc_offset_mode22 + 0 + + + corr22 + 0 + + + freq22 + 100e6 + + + gain_mode22 + False + + + if_gain22 + 20 + + + iq_balance_mode22 + 0 + + + gain22 + 10 + + + ant23 + + + + bb_gain23 + 20 + + + bw23 + 0 + + + dc_offset_mode23 + 0 + + + corr23 + 0 + + + freq23 + 100e6 + + + gain_mode23 + False + + + if_gain23 + 20 + + + iq_balance_mode23 + 0 + + + gain23 + 10 + + + ant24 + + + + bb_gain24 + 20 + + + bw24 + 0 + + + dc_offset_mode24 + 0 + + + corr24 + 0 + + + freq24 + 100e6 + + + gain_mode24 + False + + + if_gain24 + 20 + + + iq_balance_mode24 + 0 + + + gain24 + 10 + + + ant25 + + + + bb_gain25 + 20 + + + bw25 + 0 + + + dc_offset_mode25 + 0 + + + corr25 + 0 + + + freq25 + 100e6 + + + gain_mode25 + False + + + if_gain25 + 20 + + + iq_balance_mode25 + 0 + + + gain25 + 10 + + + ant26 + + + + bb_gain26 + 20 + + + bw26 + 0 + + + dc_offset_mode26 + 0 + + + corr26 + 0 + + + freq26 + 100e6 + + + gain_mode26 + False + + + if_gain26 + 20 + + + iq_balance_mode26 + 0 + + + gain26 + 10 + + + ant27 + + + + bb_gain27 + 20 + + + bw27 + 0 + + + dc_offset_mode27 + 0 + + + corr27 + 0 + + + freq27 + 100e6 + + + gain_mode27 + False + + + if_gain27 + 20 + + + iq_balance_mode27 + 0 + + + gain27 + 10 + + + ant28 + + + + bb_gain28 + 20 + + + bw28 + 0 + + + dc_offset_mode28 + 0 + + + corr28 + 0 + + + freq28 + 100e6 + + + gain_mode28 + False + + + if_gain28 + 20 + + + iq_balance_mode28 + 0 + + + gain28 + 10 + + + ant29 + + + + bb_gain29 + 20 + + + bw29 + 0 + + + dc_offset_mode29 + 0 + + + corr29 + 0 + + + freq29 + 100e6 + + + gain_mode29 + False + + + if_gain29 + 20 + + + iq_balance_mode29 + 0 + + + gain29 + 10 + + + ant2 + + + + bb_gain2 + 20 + + + bw2 + 0 + + + dc_offset_mode2 + 0 + + + corr2 + 0 + + + freq2 + 100e6 + + + gain_mode2 + False + + + if_gain2 + 20 + + + iq_balance_mode2 + 0 + + + gain2 + 10 + + + ant30 + + + + bb_gain30 + 20 + + + bw30 + 0 + + + dc_offset_mode30 + 0 + + + corr30 + 0 + + + freq30 + 100e6 + + + gain_mode30 + False + + + if_gain30 + 20 + + + iq_balance_mode30 + 0 + + + gain30 + 10 + + + ant31 + + + + bb_gain31 + 20 + + + bw31 + 0 + + + dc_offset_mode31 + 0 + + + corr31 + 0 + + + freq31 + 100e6 + + + gain_mode31 + False + + + if_gain31 + 20 + + + iq_balance_mode31 + 0 + + + gain31 + 10 + + + ant3 + + + + bb_gain3 + 20 + + + bw3 + 0 + + + dc_offset_mode3 + 0 + + + corr3 + 0 + + + freq3 + 100e6 + + + gain_mode3 + False + + + if_gain3 + 20 + + + iq_balance_mode3 + 0 + + + gain3 + 10 + + + ant4 + + + + bb_gain4 + 20 + + + bw4 + 0 + + + dc_offset_mode4 + 0 + + + corr4 + 0 + + + freq4 + 100e6 + + + gain_mode4 + False + + + if_gain4 + 20 + + + iq_balance_mode4 + 0 + + + gain4 + 10 + + + ant5 + + + + bb_gain5 + 20 + + + bw5 + 0 + + + dc_offset_mode5 + 0 + + + corr5 + 0 + + + freq5 + 100e6 + + + gain_mode5 + False + + + if_gain5 + 20 + + + iq_balance_mode5 + 0 + + + gain5 + 10 + + + ant6 + + + + bb_gain6 + 20 + + + bw6 + 0 + + + dc_offset_mode6 + 0 + + + corr6 + 0 + + + freq6 + 100e6 + + + gain_mode6 + False + + + if_gain6 + 20 + + + iq_balance_mode6 + 0 + + + gain6 + 10 + + + ant7 + + + + bb_gain7 + 20 + + + bw7 + 0 + + + dc_offset_mode7 + 0 + + + corr7 + 0 + + + freq7 + 100e6 + + + gain_mode7 + False + + + if_gain7 + 20 + + + iq_balance_mode7 + 0 + + + gain7 + 10 + + + ant8 + + + + bb_gain8 + 20 + + + bw8 + 0 + + + dc_offset_mode8 + 0 + + + corr8 + 0 + + + freq8 + 100e6 + + + gain_mode8 + False + + + if_gain8 + 20 + + + iq_balance_mode8 + 0 + + + gain8 + 10 + + + ant9 + + + + bb_gain9 + 20 + + + bw9 + 0 + + + dc_offset_mode9 + 0 + + + corr9 + 0 + + + freq9 + 100e6 + + + gain_mode9 + False + + + if_gain9 + 20 + + + iq_balance_mode9 + 0 + + + gain9 + 10 + + + comment + Electromagnetic dream catcher + + + affinity + + + + args + + + + _enabled + 0 + + + _coordinate + (16, 668) + + + _rotation + 0 + + + id + rtlsdr_source_1 + + + maxoutbuf + 0 + + + clock_source0 + + + + time_source0 + + + + clock_source1 + + + + time_source1 + + + + clock_source2 + + + + time_source2 + + + + clock_source3 + + + + time_source3 + + + + clock_source4 + + + + time_source4 + + + + clock_source5 + + + + time_source5 + + + + clock_source6 + + + + time_source6 + + + + clock_source7 + + + + time_source7 + + + + minoutbuf + 0 + + + nchan + 1 + + + num_mboards + 1 + + + type + fc32 + + + sample_rate + samp_rate + + + sync + + + + + analog_feedforward_agc_cc_0 + digital_pfb_clock_sync_xxx_0 + 0 + 0 + + + analog_pwr_squelch_xx_0 + digital_fll_band_edge_cc_0 + 0 + 0 + + + analog_pwr_squelch_xx_0 + qtgui_freq_sink_x_0 + 0 + 0 + + + analog_sig_source_x_0 + blocks_multiply_xx_0 + 0 + 1 + + + analog_sig_source_x_1 + blocks_multiply_xx_1 + 0 + 1 + + + blocks_multiply_const_vxx_0 + blocks_multiply_xx_0 + 0 + 0 + + + blocks_multiply_const_vxx_0 + qtgui_time_sink_x_0 + 0 + 0 + + + blocks_multiply_xx_0 + osmosdr_sink_0_0 + 0 + 0 + + + blocks_multiply_xx_1 + rational_resampler_xxx_0 + 0 + 0 + + + blocks_pdu_to_tagged_stream_1 + blocks_tagged_stream_mux_0 + 0 + 1 + + + blocks_pdu_to_tagged_stream_1 + digital_protocol_formatter_bb_0 + 0 + 0 + + + blocks_repack_bits_bb_0_0 + digital_diff_encoder_bb_0 + 0 + 0 + + + blocks_repack_bits_bb_0_0_0 + blocks_tagged_stream_to_pdu_0 + 0 + 0 + + + blocks_tagged_stream_multiply_length_0 + blocks_multiply_const_vxx_0 + 0 + 0 + + + blocks_tagged_stream_mux_0 + blocks_repack_bits_bb_0_0 + 0 + 0 + + + blocks_tagged_stream_to_pdu_0 + digital_crc32_async_bb_0 + pdus + in + + + digital_binary_slicer_fb_0 + digital_diff_decoder_bb_0 + 0 + 0 + + + digital_burst_shaper_xx_0 + pfb_arb_resampler_xxx_0 + 0 + 0 + + + digital_chunks_to_symbols_xx_0_0 + digital_burst_shaper_xx_0 + 0 + 0 + + + digital_cma_equalizer_cc_0 + digital_constellation_soft_decoder_cf_0 + 0 + 0 + + + digital_constellation_soft_decoder_cf_0 + digital_binary_slicer_fb_0 + 0 + 0 + + + digital_correlate_access_code_xx_ts_1_0 + blocks_repack_bits_bb_0_0_0 + 0 + 0 + + + digital_costas_loop_cc_0 + digital_cma_equalizer_cc_0 + 0 + 0 + + + digital_crc32_async_bb_0 + blocks_message_debug_0 + out + print + + + digital_crc32_async_bb_1 + blocks_pdu_to_tagged_stream_1 + out + pdus + + + digital_diff_decoder_bb_0 + digital_correlate_access_code_xx_ts_1_0 + 0 + 0 + + + digital_diff_encoder_bb_0 + digital_chunks_to_symbols_xx_0_0 + 0 + 0 + + + digital_fll_band_edge_cc_0 + analog_feedforward_agc_cc_0 + 0 + 0 + + + digital_pfb_clock_sync_xxx_0 + digital_costas_loop_cc_0 + 0 + 0 + + + digital_protocol_formatter_bb_0 + blocks_tagged_stream_mux_0 + 0 + 0 + + + epy_block_0 + digital_crc32_async_bb_1 + msg_out + in + + + pfb_arb_resampler_xxx_0 + blocks_tagged_stream_multiply_length_0 + 0 + 0 + + + pluto_source_0 + blocks_multiply_xx_1 + 0 + 0 + + + qtgui_edit_box_msg_0 + epy_block_0 + msg + msg_in + + + rational_resampler_xxx_0 + analog_pwr_squelch_xx_0 + 0 + 0 + + diff --git a/examples/epy_block_0.py b/examples/epy_block_0.py new file mode 100644 index 0000000..6e9ae11 --- /dev/null +++ b/examples/epy_block_0.py @@ -0,0 +1,38 @@ +""" +Embedded Python Blocks: + +Each time this file is saved, GRC will instantiate the first class it finds +to get ports and parameters of your block. The arguments to __init__ will +be the parameters. All of them are required to have default values! +""" + +import numpy as np +import pylab +from gnuradio import gr +import pmt + + +class msg_block(gr.basic_block): # other base classes are basic_block, decim_block, interp_block + """Convert strings to uint8 vectors""" + + def __init__(self): # only default arguments here + """arguments to this function show up as parameters in GRC""" + gr.basic_block.__init__( + self, + name='Embedded Python Block', # will show up in GRC + in_sig=None, + out_sig=None + ) + self.message_port_register_out(pmt.intern('msg_out')) + self.message_port_register_in(pmt.intern('msg_in')) + self.set_msg_handler(pmt.intern('msg_in'), self.handle_msg) + + def handle_msg(self, msg): + + nvec = pmt.to_python(msg) + + self.message_port_pub(pmt.intern('msg_out'), pmt.cons(pmt.make_dict(), pmt.pmt_to_python.numpy_to_uvector(np.array([ord(c) for c in nvec], np.uint8)))) + + + def work(self, input_items, output_items): + pass diff --git a/examples/fmmod.dat b/examples/fmmod.dat new file mode 100644 index 0000000000000000000000000000000000000000..a1b19dd1d270e87b7975f22456b4ec91d27e5ff2 GIT binary patch literal 640512 zcmeFZeOS!f_cuNfLK31RBq0eSmC)=p2&oX2lFCR@sf-?{#~IIg?hry4PMi=r9y+OJ zukjq>gbs1SAan>Lp+mS=pN~1e?{#1I_5I!d-rwu_7tv@jR^Pp z5B?wj{14&B>>vEr+;W5_M+f}+Sm)SF7swt1GDQ_eYQ8mqhokW?*4vKm4|1 z5>8&0ftJT=R5D*T^qJQeBnKN*P-cN_q9{TL_BM%6_ZH@-khgn|S z64c2$T-RPq`r}Ez#ojEkr?a0^+z;QxPC~_p8>*nf4AjXwT-RQwA4t#Fr{`DD^AqX$ zzm#}@TE7=wht6zUYC1aqZibrqQ}L)tI%Q^M=`lWFLDxd$kE4d6is)1Z2m3Ap5WL7p%T#I1yE!q}N$X3`aNo{nKYCsVP! zqBE9U^TzSRQc;Q%d4sPn3YT^Q=~gdZy)OmQMs$QH_hx~aQ3{M-Fo(s;{6TWa0M8D- z&DJ|b;95U_D1KmouRqAaV(%uGL??gHc4?2%!@}6}*ksIK*%p`Gn2p-sWmu?{^3QAz z<_y*c?U7VoI6wx0*?KUsJP@87SpdUro-m!P!#`5{+?Mt~^jqw$B6~Sxzn1K0(ev}^ z`KHvrnbg0)1sx%uY{>UUL1sxir63G~Yn;-7^1yH2*q& z94G#$i9cfEQ#JAF6!B|2@ymFt2UI)u!ZV?AP(NI+>iR?ezvZ8fk2-#;1wFBjO+~@3 zXjS@8a@5H>yrsP>@mE9qZA1JuA^wVg@K;UzJw^O=A^v(%|AVRjGHVlPxaKg9qm zJi~Od4mT!!H`4##$Cpdv%c1e@rFbf*c)CIHi32!p*!@N6AS4t^O%(M-?!-3?-xZ2<=`IH8@fK50>6{}wq##J z`6H9^M{)nd>;&Zxovg!Ur2mHW1wWqePx(Ve`NM{c4&@K^#~Uht${#ve zhqts(qxQkiA><{NLR5*%}&e9gWwJ#=o1!FQNIq&@$gN|6-bd0r|_H z{G~M?z(dHtO~jW0#FxXwpL2RbHiGyhBtG>ZegzP}>WFWLiEnPiUoD@=A5wnsr2Jq^ z{C-6FA&2;Wllb0;;_pJ}9F{`yRM`?wL&Mk`iqCS2*I0_zCW>E6ivO1s&!;G!B^3W> zDE=>8(Suj{f#4LM1S^LxXDNokU|I2vC$7w7F{O#9ZT_2&kC=-OKPRC0?gs9L5)=&l zz{;(+aEp-%VBn@@g+?J@7a9-kVscoz-+VBh|BCYyyP4U>IMgIQ=TSRCF`zIO^BeZ_ z;Hm|fv-~kD9GcJLuPuSfH8pI#!9qxVB!!FfkFrmW;hc{eFdEmNjYOeS1s{4c1|@CIutL|H{PfQeVEN}sHs<(ZXeSScZd32D33*ab9y!J> zrvJ&-doM(7;vqhM^Aa@a7lulA4KI+zqQ)hUiQySv{vs62zU^W4esM7Qm-*0V=u2iK zj0a`O4sK%dmPsTbSQwkl4fGQ5RM&aL@Ao|VY63P5*u?xRnz$jK3&KCvu=w?fa6cp% z=I#EPK{Nu4MQPc=z@xs zomKgF<>)qT3|4I(j#c%vPTGIT#bwI~^a^tZL1dtd!Dl(_mpOs?#*wi1JLy{vD=l2^ z2>B}=@b#=VF28-Hwb2H9Y>FF&UX7%0v8${)cr>av2|=;?eVM^iITWlP2CpZKg^pL{ zFzLepQF?z7C|1~E-+nViTMo(5Ims3kUm4~urT)8Rs06pf_$Jf_1P)tOni=$-oH`KZ zD#t^xxg3tLBcfIjuAmiKV^7z!q6L3a9_nm`&xcIHhU^SfyWUk9{xum7{?!-M?6vCt zB=R4pJhXQT)N9irB}b24^zs0MO}(&igCX0rB^@gRdSc-hPdqm)9WCXiJWn|d4XsSU zGRlg_pG$*NwI(nUykPvaG`Q?Nl%?IB0oD7v;#enVR(LBFivx{ue}p&wHa!&!yG`JG zKKP(lS0j+Tn#vbkOo6=f9id0+EEqg81^mkVS=-NkP%Lea)vM;Soja0I-LoAYjGc|e zO)?aC#PEjN0KCBU!OI|-ulJHc_1spl(<>16DU+b&;Zl}%Ita9Xf9Da7tC{4lM9i}L z!XI{r@mwF%jUt;?eXBzZ#(PRZaxTe>iMIv zT;_g04hxUI;JNOhxavhL8t3oh@v9f0SJ@LLt3AjM=*NQpg$Hb%Dhw8yFM*O?1uUR` zAxJM)^P^LLWz{xP%00Jvut@~E2p3~kN)g}g7>W6(ZZP5gQod6d12#WjVQ;1{g37_s z(E5U!Jr9WjuXPvry+;?>xn5C7DTr%gqEX#;5gLEI%3sCBpuc+wb8B14YhFe`e!tVK zX$j?_@^ElB{)6odmxAQ|alWP7eb%^pAy(pH-obwfT0~PGGJeEk#Ib0z`T(>RK1nAx)G023+AC(-N2{sNI*?| z7PHy)iRX8ffOx<<78aBU{%3+g7}LxQ&67ZSb0y!OD*(gUL70`S;1<6ppM75 zU78FfV-s0?U>mGm5diA6MJ%m4KJrx-PX)2WksFTb2B)Ga52e|1 zadaAH-ky%RDcy0*qcl`*?#rr*OmWi4X`s$CVV9@S8eoGbEPvAzUKXT-@*?LT zs#KNEts$?0@{s(1DsM2wvCVi)yEhP@2g^|zyGfM$)&{Np#UQ>AD=Jtohxla-)Pf=K z;e;F%xsy~?hC{*n7Z(&PHdlq;lw;k+F{tP^94+f84m992$gls)WY~^=ytkHKlhISZ<+P|I9;S=dkURkF9+!>#D8bLYl zLD~Hma&R*o4tW)0pw&&%@6=0l{iq9Mwi$}I$4nM2J0VB^zirWM4MVf_q(47SRTeBp zi#i)noZF-l2FpQvb0GXSY&=xj$f4-Q0a5F}CxE8N8f*S271h3?+_TLJC#fc4^Fhkv zcW1zmS zy=K8JzZB@$bry5U^@GaR9dPAC3G-JcqxNV!%;_;3ZTlsoIByYmSQ~(9?>3+fN#u$Y z87S4Q;Cqii=shtE)TbY@vJb5wFBb6|5t5}0pLFw5W|sM{jI zHqTbFVACYb3T)=VJQxoK(B8s%9p7&%LE)NDEGsjMM{P@hx^WF`>*{&%a`Ze13VF|T zvJT(#qrFaFLH5GPo+;T6Ap6502>h=IFngCAeyIo%+5eFH|G*{GK9bt+r{^1t{`d23 z==n-|ek47=@K^(LkDLdsuf@aL+gq7$Q3wdHzvh3M?_$!XIFu&V@e`(@ShFq|b2@$H zS+#Zi+?V;N%z7m9mQjA3`4?aIXg;p(6Ni>TO5S120<3O)%CdGI;Fh9T5Py5f)-DNy zeLhQ|%k9I=bN@oH)Vt3!+ZM8}3#6$0TE$%}!ch`Kdmo3>yvy4NRJ{M4Wd)V+VbT~V zUUHSq>Awh+PkrI(VM7o;y~?UOECMg-2rjcsgF>Hc%(Ed9IxLEYA_cP3eWO5Iagn?4 zxxn58N1?V*%?pM`qsC(qIu~8xy*y&jz@n77tn7xjoM(Vet|Q!%a9#VUKlF9>+L3(~ z*>8FNAN2f|{tc)8-TNZ~{X;9+^zCWrwRRxCOR|Jy1>qwIFQ@+dQ~&d*|25QqG3i&5 zzL@%-LjCt5`3A}Pgx?`tCmR!<`hUPH$(}FSlaYM~vfrNkr6hlalK;xde`@k4BY(=t zzb52ggG}=GlKXspOga=FH)b73HYR!95C2|X`BxJ^iisbmh(C$MA5Y?w7xAeh@#_rn ztGL&D_SSPAC^w!|t!ga?ovg!k?bSc@vxt8M#J^hNpR9#{KjdEtmy!Ma9U-6@^M-5A zX268aS428lhwIww^ed@K8Dh`+wX-%8?d2JttG`rj0&QE=EZx1v2W69QIwSSRzKtd!qtSYb(El6?@vs<^bPN~I^w{xGHdp_6s^T6(^W^b5&;KG|1OKA1`Q;9BiU7DoBQ zg7Sre^2c_{AA_!jh;(ul;h}^VQu|rdUf2JY@wD_uXJ2r??| z$Uh45kCyyZK>pfB{;MYcHIH6^#vh(CH{y>i@kd5{N+UiUC4N=iI?NdHO+xv>o%q*> z_@^a4I}o44iQk39?>5BuV&c0O@qY~QUrO5Q~DF2U+=zC;a)c1(1`yk6nHl3-2d}f}Z*Xi@Zep z5bs>>f4H9cx1v1RX*++pGae0#=b>i9J8m6L=St^)Voebr`K2%kNaNSDlA(!EVKf)k zvoFl7J`u9|t>&6G0`M#hLgm$^+~wyabn^(r^6*yJ+ndg{AIVtpWqr(i6aZOPG0f(R z3~Zuj!&a+yP`4`?B*F7}QgVBcHqt(1i$4!=Ou@i}StzOKh{=~z=>B9XGwEf7m2Z6@ z>*aV>IwKWI=6gfP0%ORkN(IYK&OGKwS4g`#0~NMId0VeEG#>4RF0W0{`~sbajJIM+ zTT_hLISsOWP1!7AI)r}kgmLqGg5K72unaZilI^`9(A@*oF?yUeq+{rgDcE?aH+FKP zcgD+lk@j6*OngH75bYgNw{01~226rKO|2lbhV~&^wMuWaHMoYjqTtnGm8ci3-FJ*f zf8T+qoJsqTN7!i>jrJkRbdmcqIb?oh&|Pi|eGbz;BzcC4t+#_v3lR#Y4NxUr zmE#4MvFMXM3}-$iTTTrwq1%O+*LJw9X_P}=)F^n9Z4Z@SX`NBjs;u`02XIYu z#D?9@W4F;hBxlt~)K@s+qrWL;4d<5SM$;YDx)Gpgyj-@cp6;F24TntMF);0}9DIfv ziz-IZK4j%kyfnu}w4y|g#VJE@y$M5?ZFFz4DqL0Z3a~D6FbI6ts*)n*(BwM^gsO2+ z;3NmlJf-O8%n6_!I{+U?6p4brWT0{H{un%MB5p6vKyh@1N@6q_eg3utb>tJ(oR|!_ zU)cxdmb<}diwxK(X%Y=ea|hYa7AP6ijx9fzjv*h6=(eMjaxE$ zeGTW%!vj!S*9MGv9G{gZgB9yq!{|M8U|LfWOi0RL^VE(%MUSl#GNeCg!`sC@jM?J}JQUBAUc_^NDXzaRv(dT;sg8#`FD zT|8>cU-HjK=3|LZ94fo)n>A|2**mVqx{7aDLUvcL_uy1zjR{>W|`-+@~w|~?e7af z@n}EWcrq3qWrxD=na`QyPjOJVcQ?Nj|BBVmoR6A-9B#KT9-G^TpnA5Jw{l9r%r0A4 z;iwNhu0#Ub^BdUDUlSlObS_LY`pk@ZB8X!%x#Yn&menp8#TLtXd14Z3ehx&*Ej=7= zBSXcCROV{0k7c9h&^tuRJa5Us;mT|ny|FD=B~Tt}7sdxaHvs3E{%FbO@Cn8#D6N`> zb)!4ti=R_a5aY$N*L1=VFJCaunaG5~R4}{b4W0k!41G@%r^?1~4~wpl>o^kye-7so z`!tm8@xrEH6a4%O<)lVy=D)iK+UrdRiGevA+9Mt6f~JDYT{HNQmJSlL&Rl8O3vy3+ zpuk$6o7bdcoxVGYMZGb8B<<&CHHaJ{`l9`GHxLKchys>pz~L>EAU>-fR9wpdf#i~^ z;Y5E3yyc4OIfbfxLpj!Vn1Jz>1900!x=Wt1Q`A&92)BvGfqMK3QO6`X9F7Fo&^#Dg z@1s04FHmLO(GDy(izw#^RY~XNSblOWDolpqqX+ap*Vh;2mt=I z9ylU}^3cN0yfI=bnp`#mOMi1ds9QSdZ|DJ?J57h8yfi47Fo5+s>;+lVOmLK~kUbif zhQ%GbqT#rinDJ{WsymM5DL1{*|3YU_yqL(R*`-3UtP}h+#}_W{O#v^J7Ylzh3+ik- z;*<4r*z=AlX!eHzHU|3RektXlSz-L;?b#?uZ%bTR!q;DyL9L}eIJ?dPVSgD+yPe9c zjs=27t%t!Amb1-Ek|^hV<2hYuJye{Ck{y|RR?J+ie)%^ud$oZZG$nwx+Xv=!K>`DW z39!z23tQPO1S%c1eBaU>W<&z*j$(#bm9{0DAExS;5zN+t-^|DYby!D(pxBZ!$ z9i{VvR*#uo;{pgy2n1!nRv=vRnAKJT0-w>=`EId`!_!IO$_9RTxXBvk+8Ks8sh#g zV*_*OEMa;%FY9@ZJ%1X7#!lz?ibGL2zG@N594_;stI_C|r((qmyW&H^OlUY6jY3n- z*8P!!g-1Mjb#63Pui?yKN(E0AjT#(w2qj6h?xax z_)d#3$Qhi+KBm%s=1C~5p7D%Li-`ka!X7^K*S^ry-3@!pi39b^J^bOfM3|7XifJat z(Y?i9ZVNA&a{qi3|FVOdjf}^fIU#5>@GWm@oq%R@vsqJ{M*g7FJdkzW#N4hYfW?`) zFyvtqbJ#@towIAW;P_Wo85E4ls1>|!N)k4I2}D;HJ=|m{L+z+^W;5Lm&3r^K`)d*^ z2W7BfePdDTevq5INkU6gIdj@A=6t16b|Brlu@Or|xQhUL%aK4fBACZ0`*<)nSlI))&`@ZyiEj{0` zIS_5Ae_pjKxOQ|B&h501&C-keN7mu1ss9q^bY@2Vx1;`hlfJP{26LwVJ5&F4vLWHZ z{|49T>+I?5Ysp^^$zQ)8`^q%rKRfcLn*3=;{;hw|#N5_wV)siEz<=d7e&cvNh)J#? z*_QCngx}qp2!-xzc<3-22-F*gcSu%~+=9O+ypZNgO!K9t`Fln4_lD-vi{>+*=2tu5 zEtk-IccuAmOY>hq^KVT2&=5cLo`%9y;!ip8sh;>$8K~ihh+l?Fm!PEj5OWD7zg$b= zI=KbUC%mQo8*2ZZ_&102ClUW7#6L^opMDQF)X6$ry(f>g4v&R8@ltMgItg^L4%fBU z>35<2RZ{<+5Pz+Szcs{P3Gvs6`kznzmyFoKvnIx4*6c}Ge7D~}vJN*QeJ$y?rSSpv zubTS%>c{w6`oE3hshr};j^bBF@ui@6J5BMHO7T}t@z;Gf#pBphyy!?W+;$6LJxH!3 zc@5zg2p3u(vl|_gvC!~1KeJhiHe_Ex_BE70nkawd zQ~o$X`9mk`aGk!+{te0p#*~k;C_hZ1{GgbhDi&J5%hZ*>W(mU%)7yf#wIb`?t zM8Tc5-05OEW-fNe3nwkGun(=H%RY$=9{0gzrEVa(^jLI4ngOADlOeW!KR8xG`;aTw zR9d_K;5u$1iZ7j3jr^K{Ua1o>-(vudccy)aYOg4J!63{#PWzDKYeaV!$zk9_0BOo# zkZz}ah|5A%=e0wiye*@<*s-d_QmO+S?1K5(Ls3vo=MhhhM9w>h1JegfKb?Zo2zr+az#@403(0Pa=!vQPSI^tfc6%jmcS7wkp3Qbno z)1E1}%#6-c5;KKxf5B+lSIFVm)kc>}_p#u$au{~mHc*szMUDpPb~wpagoacrB0fJ; z<*E<3f3YoyUuLL2D(Lw^Hn6W+47M}m5W8Zl=wqWQKHskhu)^6 z!RlUEJIezHZ>D|7X+wTNZz|@`>p@I*~DYg?>X_lZ>FSYv`w zsYBUMo@tn~vn!5&Fav8UQ&AY~%mWsCqmK`rpN*ZsFHKE_y7ope@{14nolk)WU8k`< zidi5m?TCk;`m+urQqVcPJ;r|bL;vl`C_Ofxx0209i!SXTD>jDvf28+Kkv9 zk8Gz@0uw?KV9Cp$Sn1h$ppAIPS2=8F+U$5bgR1A{9p|IHIu3;?x!hrTC}!TG#1J(@T*}_c;7S% zGaD}QW3hBc6M(GH;2JM(wFojtm9T9|F~FZk05_{-%M&P18lC1jR(~+f`EZnKj&su} zDdz25yVtzAG9Cp-x3ZjD4ScU;9_`(;*udNbIM+!6mkK_yK9WSJ?6Qs@ zk~cH|Q^6?UEBPslBy98!!l(lREV!A3l?xP1*se7SUeo^3B$3UP)BT}506H&d1LXs# zPR(}_*TZ&D({VOxHc9yH3&|+?$q%bdI$*cJ6s$bw%QC|ZG0b=tXx_TBrdKH-T;l_s zP8q?<_*4*>iuv5H#-RQ}dC1w3pZ<`FnuRmyJhmHtj7dYSH{G8u>yA0sr$b?6Usj<> z1NWiR;0-s0F&mS(~;KlX&CRi3C^+n&1|OULm0?wCE;0t4D-pkmNhQMF$m+&^dv zXzX8z{tC>1=9tMacc~?`Jw!1z>ZWSMK`Y2CoQMLaQq{578E9QU0bMUyW120+@vs9T zhsOhPPrvbCX+^anbLEgWLk#wxY+(Eb$|L<_Rm@-r6sIy2e3__548`B6 zRzzIUQ&ew092@x

lT3WnrBhhE+R5u*C>iMztb>%gf64nvH~}SCoe&hf4cU9t!{L zfO(xAu`T^Ch$=2M(Hn)9tw(}l-Kw&Vq?6lr1hjtb3|pvHr1H~4mkqbaP`&DK6dmg& z+Epn><8OAj|C9@MI6?XkCaY>UF}(H8mexD*D#bcFSJv1-_yRGc1j%7&w=B_A$MN8N zbs+ZXdr;Ioh;r|7Ys~mQ0l&SZbM1j;Dt>+hh=98yv4_g4YN5{h3qX(EEF$^kSJ>*``4>RW(OHldd z9?v_t5Le8TVr9=lzN#`DH7~1}rQ2zKE|PMF=6A;4M?g|k3>=e|uv()OWF2wSe)!efmt^XR2u&lox@U^Gto>X;^#dM4X?~DZy z{rCy%{3aH(ihX?XtryH~Tqp{z?&9G!aVU12j3!qsQ3<(x^OZP^^<2Yp_a-nS^^0-dt|^k8DqP9^nFUE?-4>AmR0d>tqwc^9dIcoosWD(()2-gxGNO%F^ znS|@)zJy;T+;|+_)e(M*a0THySa!i9wU5UwU%O1Msb(L!fgn*AY4GWPOc#QGvN}#tq8X!yy-IWrwyqd?qAwM0VyVOnsR2-nY)H6$&LfQ&X zO`hICq+O{3Y!e^t}J%sN%l8oUyoUtxw1S-bP=e4_& zG2QzlJ666Jm3EmWPwWFLtc)Pm zI~5d{$Mdo%V<-;xM$1o5-0g8H8eSA)<&e?;$j1r4KzIzb*HHTg(vKj0CD|KI_AZe9 z6=Z*mZ3_~j^Wb0-x; zD`sHviLSU}2Ho|w8_MDvO|WL97YK*+V-~-sL1T#>Tz}^RlD<|fQL=Xxo>1ou+Vx^JQ#B4ZRyzT>VctqdST#us)_W_V~y8) zWBryXAjzv28BNImZOvq;Y3d7Yw^Kd*;XA5ReXO9M?<5p+t}=U?fx`ZlSXDwXdRVO* z{U8HNF27}ELgLK!bE<&58JHL7ip5}!O)YoDd51-Yfdlcvmhqt2ku750au{<_3~6yT za9|bfhn$mD8-Mo%?be>ClXbYRy#eW8CH)qAO0t(r_SIxx_xwESUorJBG>h)5w^gf3 zWb@(0Xp8TsN|L%O1jv8?*CTXO)7QM&3MXn^d#s6;z7oT}4>g1IHt0Ju7 z_-NOEWE~zr^HoUmC79@e5}H5Ve0tFQTGRaI|Cnz^^It;qpGEwT5kJC-Kefc4WyB{b z@hP16RY?3&K5*vkuBGCGn#(M$aM3@q4sU6n)zY5ypOe0X_-9W1YgK9o=UVvpL)PI| zlhe@h%TQLE6Nwsa1=q<@r2mBUy{LUJ(*I2SEhl{i^)H$Fmr48`O8hM({vIR#>iYk3 zOge~J51vxN!0VVT>SP_R{4qWW*?;+CeE;h2kMT(;o(54o9ijLw{}I0}@usEti>CN1 z{An61ElYu{pH=+f)#!g@9qu_d6~upxgTZMeojy1OMrRaMv$Nk)9Kshd2&EuZdMU|=}_FrjbpD%BJpE z)5C@tuBN;AGu_a{WCk`HQ9L%=bB9ASu`a#~Sk{R6+D)mDZPyw4IQsxMNQH84Lat4d(HYz;doLbyEyzbARJQ58Pq}K2&qM=#EKzlr(9}Mk9lp5)^eYy?@&k{VPZ8}yZtUj^bDp#M zEZT=`+RbaX(D#ee=A&%ID}E~?9-F4*FiC`#-)j>Bg@P?C(>VcHkp#L-{=kd`iC}zk z17F+wGc%FUI%{4g4^h*4xi#(o>c4U0@>tXobK#+u_GCy0Y4{@9WmqzPD zK$VQn+NAv8*0zvyVK!PmTfn;~C1d7Pe^kCUK)bHAuIV+0`Hbs`dB4vBrH2EuEtS4s=&7i=4Dk?u1aodb^bUE&UdEI)U>jQdk3iO$aYj2c& zr#xibAW}GFK-m;G=q9D_XIIdB->gP8c6&dt*+}aY^Gm9I6&YAi;fiHv`eSS-isSBu zBH`TuIIPVCuKSu^`$^Vs(yfiMHZ2ll3Ve0Q8$P3y#MRI_u*19Lx^gLZ9a{#lj| z25K{KT09m0=#ma!Q_PvD>vX7F*@M1q9KfdhOtp!VP0-=E7y1sRoHSL)O(xC6c>S)R z7&ewaKb{IEs?MBTd9ib0*M|hqYu>1QUbfQL#CPyGrKMp7j+I-rdcuGU@Ks=Q-=XKNLI<(|JgbpV`%#1t6LCn3vAT zXTpn1&}(50zido3V)y7=cJ)#IRTPfpVRuV z35uE5`Gxi>=KohTT0%L`-n9rbHB>umdyYRTh{EClRKwarB z<`)76)3RA++XUFxejc3e-pDF`PoR3?O}zAV6RS8yd8lv=-~AKqJ7?4P3j8Z~@}&2{ zoE1#qs)zBP0-;hrgKher1YI}HfhT#bA?k<>Bpz{muU=cI=o^5Fwc-4i>B(qz*dOBq z3~)z1?d{G5F|XqtP=A;o)qBol4$D&D@DX3=Bw>TX!`)eEHZL2SZ?G^%Z#4u6^Ufa12aUsCqs{!c|9`E>{` z`kDrsYzzMWSvYK5U&SIxw%lsLXa2x-@<+lI|H1#`pZ~}CQU0F3zhBdv<4iLMR8K|4 zuf{wrCLL||4@Q$xK+A(&_|66C7@2jCm6cIm+trm91*W5$nI~#9%`xN>?PpK4W$F?O zG>>!#b>b&cS>Ft3TjU0>Ui5*3OXxg7@mMw4xF6Jhq5b&sYpSf$49p)p5l@ZmkIi(K zs*s-+h0h#-dlDvqAZf4Yjk6pc9vTN@q6WcyDXrbpR;$*1^#HE|a}1B7^Mi;rs)m|i zsC@f{3B&0(6{edL-1iKy~kIM5eat<#q$2N|MWEy zO|6!L^SCjPuN)3Py`lH`+Ojh5l$lUo+y!;A4%fAB(XSwVojsj>!MaFQK(-uT2MNGG zoqj*zhpfYO?REM(dpi3fy6Y9VUMPF190AU=ol$Viugv?i90M0Qp>o$qeAu#|sVo`8 zw)kSTbtgF1B0KjQTOgW+q5TXoy+ziaJy!oGM|6=nfS^Z*vNd1ja7k_two6CRcg=EG zmAu>K#iG$*woHgKpL}px_JqC*&KQPQy~d(D-32T94^lCq2vZX6K;h-B%FdTVhX`A+ z>HrX#CWp`YOGWV$fy9!qWoe{*mJ5U%v~Xe&bg_gu!myEpJjumuWl8s z_mX218;BFn#gs`z#WSgv@>j*JiaM;=DFgm7JZoxb4O zzwL#SeI?lsrRR^Q=NnP~EUABjgz2j5N2t~|%@n!4H4w{HOD$hWvYr{Hs6Yjf+#Q9P__*1@ZP7|HwN0_9gQF zcUzT{Yz|(y+8U}z7LeS6-yr>7Pyan%&uIQEY5rQ~vmMQ611XNqr!5)dMEuhb|Ng~ClBI;NZ?QjcVVUG_IrjOzJyfau{*iU~zuGtbTfa`n zU(a4+i;2HpI{sSB9@|L$_iW+s(w=2a&9vXI>Hy!m`~4&9@PCcZr`K2+J%7c28Q)dv zzYq1_r6r!GTqp~m_+ClzE%rAOc~HEYQ2f?X{I)qm>+1PT`QrlR4=Lpjovg!k?REM(dpi5N@s`v0{afa* zaS3>;r zr2HW0H%Mhm`5`Nj^20Q5)i%lxHWYsb6n_gTepbZGMV=I&YKl+cm93&IihoNOh?^sRMa>bn(p_-Ik#VnN?kLs z?s#uZIWh$|zoGZb6Fu$_=z->nUf@-2$eXh0TS||f(9m`&Gz>|H)GMa!=#gol>1v9t zimccQqi(k9R0+ejE!$*Y>fpP4t@_JD>1d?S5g)zEMr-{s(;D#W1uoSweGB!2KKP zTe_dCnemcexwn-Rw7YLJ8;b~7F?cbY*9yc>)R>5 zNwyeW)l@5D{|E22Sc=A#$62xCecrfrA$a8;X1>8o;CW~mB-%e>DU)JBIsX9PV^hm4 zhSE2R1C@O3pg4*rsueN&i@WPlP2kg=OeuK7ALoZacUKP=3Y-TwAJaG7dlEq7 zmBsIseq!0}C1|O)j>m;0qG2J`tZi)Ot@=`(>d}>~u|R;i(}Td+S3!3~NswJP2lg1X zhS;T4E7Fj_Bcj@XWoiI@TeXOv9Yl4i?PjCoQae0p5N!y@U#EcJhC6pGrQfJo;X}W32)tinDpq`?eTaS+9Q`R3Eqge!v?<-NHe?1= zj<98s(ll6KJ{_K}=?(*)q=CY%FMn{+6zpuKp)$aP%RJNZ)KX7O``Qyjj;Es_yggG0 zEzt50T4z0J7D+qMHy&0~KpETzYJ;dw?agyl_e@L3iI|Ln_m!$=hcd9~@I$X4I`WxK6LfWxhp_sG(+ zJo?QL%`+#g>N64t{A-vd~UXfKJ7-qUmxW#XYXp4zZ;xE zA{c>59W*Y!FX+3!Z^JO=-WVKDwIT{<3sr213+8>Y1HnQ!Ro<_1h6e8!RLW7)OPWP3DK!AsZh+`@1Z=j#RxBEdN8O>LCr)%EFUlnYrN?^ZVr73 ze9#Z;-?s-#n|a)(EE$@2w}YLQvq58#40~TMVy&_RKs})iRv$^ES`8WIJZpsly+EwG zoP?RbDtOkyAk1DafbjOK_=2uUP&l@kK;tW8J5%yerc8Ye!zQa<+^A+7Jf@=j)iOekdFARJDK#Cz{}Cr#xKY>mL@nGXt-(UI6JhEd>-gLh_)C-O`n}E?}%0r*}@i(JQ!EF=e zp$?{e*hsn`dP#XGwkImL)BTW{AyXXeg%!*LEc@%R{f%^nvSA8*_@g&Cy3@I=cfIPo zz!I|WOr~!g@2D(tD0X^I!t8-oX#X$+1+}yVH&p^Ie|2@dk$-onaN#qc8XOaZ#l>Lv8v9+%fa2i&Z_{Tci)i zZ6Rau&>dRu2X#>uySm`Aq@f_*AX5E&Mh@9gLtthwD(W-t{-t(Z4fbg|IKW zr%}Cr_tUB&-!mZCs6WU9Cc=@T45-$>E^;%U44E%2(O2)O$T5m?&bdBlSLKH9dQ!dr zrzTa^YIh9JwgAg;1Frdv?pK{2vRnJY{*fyQk0e~zzUgcqbRc~t*&9jrCjGGgsw@N0 z?3ze=b`tpd_k$K$L$dDqmwvR@>FeyZ*pDIm#&PYqo;n@IhnT}sM_NY&r^B?cuFT=V zR8S5v!!=X-P%ReK7LTmrW}m}xs&8+0=R+E<`-8qGI48o6^DJ1*ciQi0yJKy}bd;>5 zbq3gQWAhoPEbIoA)+70dPH7My)de~op9%Xmq{0S$#_o%Jz`(jQZhYv*errp=1%1~L z<1hK*Bv}e-Pk8fqH$Mzr)d7rS=knsJWcv2vG>e_H7?g@&Zt^4<%Fm9%fg*blZVKYk zx5?l+!T_4i`@?3}WVkyyg4vA@0AY0-)V7Of<$GmlliC`aewl-owMleGAmsp8Fg!qcltYC z5jjO3Mf=mx{s^=`9pz2Fx8Dlw*U>s$*S~YOjjM_NI=YH-WzMef*V|Mu@o|VYBv-rE>f%ZVP%IXdRw~ z`AWikeVe~Vn7?oH*$4Bfo8Kvz?>x+REatxw^Zx_*<0|;W5&V+@{@DnA+6aDf0Dt`m z{<2N3FO?-p(3fYYS0}(_W}MB@D2FC7XKfL@@Xjl2K?6x{8t42vjG3;XdRw3 zBZ+LHT8R90MCm8HQ6gvodL+tgP~P0_pYoX~Z-w%eXkRJXmka*Q1^*ra{|1A9)oA}` zw7-7>^oDlbWDB!UF0r~O96(q1?IweP*8+d=y}Yjeb!hKrwAUQ%-+=a)Vm*E0B&x>x za>n|4j`i;Jp;lXp_3MrG`>6GORkJzCJR_kKMJtB>i`L-_j36r>ebmmIIh~a@4@DKq zD^Natz!3Ic+nGmo1l{btwi5NbqyAFJk6Vx*>5w0fAU|}p4u6626}7e6GJM}1-@mE% zP}K|aLk;;60r~U<^5RRIvnpT6k7&q`$DrG{J*x@;9t`};_y6n4>+1Vf|7d(a6aD>+ z{#v2`tG>_Qd5p)9@AJ3xJ3mBV{Hid1lfW+(;1^TKhgFadiq3td4frV-{AB|E+5~<( z2Y!19{#1Z}FN2>Cf}hop53!IB&X5nb2gk}-@V_tkzZ&bw5bMbi>r=njmzHC_nz)7t zJFH(Ttlz!{(V`gZS&8+p$eGCN-XbR3@5$1{a1wuRQ-{NB6@zB1z)g2X}0^kcsDzgGk-iR=X?%lf6ov$ zEx0U?)y#%H>j6<9Z^(V!f+^2>j|hJnL2o_<(PmjLnq7+|P0bFeS8!j%o|?{1$Fk(Q zBk);W7RZ&m9!tYDxQ8Tc6t%0L%XV(l$jLrKJPV7aI&}hQV?wpi3_?tnsui-^gujIQ ze#E@n{F8jFH;+I2P2tR5AEojS*ourw6D421NP{nv$m;z9aV#N*Y}WbH(+73vfi?89 z0~2J1vpzlQ1-YFPAx~b%Jn{s@0IAP`adE6EoG!v@47hfYIMej^xYcM zg&py@8+gk2wN2^j&~dC7HB^@O#ogoBSbp@VIj<;y?@Yu%(Zt$_H+1)+h@X3j!~GIz z@=8zI5^hW_cf-at$5NimHKE$SMsr1B8`-E$5=VQD;#H4Ld0h(Zq|P-HmF?PbRSx_v zZ`2i8cazBS!w3p;Z%>8&V3#RhYb|5phc?8OH79RtgMLWnRf~tQ`{s_k={(}Go;sn* z`@IwEpLC(jY_7wJhox<B-%)iRe`-NhBfY4}-UMnnevoi1A4}=^%{i#gFp*`2c$2|R*&=8hm#>E$G8iLEpZai_ zX=5t7F;O0G5KoVa4XDcuU-Ax%qnnvQ;{BZoR1{X9R|Z51lhd)BW~k3?M^9vpIsBj7 zC(6r*{n*gO~S1E@+<`4r+xs1K#C`i@Zn1U5g-7?;FCyIhfi%m`%^l9uVt;L&$d2 zW!dljVUhd{wZ8+zz3E_zNOY+wvv$=NbL6N@TI(z_w$u@egaM>F{ zcZba&uhiS(`T9sw(@yzIN`-JT4r1GS+0tqO-a~HTJ!HuCGLL!@1-%aFsmK^_?itpNcoIaj{*qs7<@8nX(b({# zNGttYI(tv1;Nhv_?&uhDe1o_z?sdpiFP19GljPn#i1Yd5L{@j2BmdkJ%fW5^IHFN~ zuIGh3H37lGVVVK2&c^w0-6V1OCC-OdVhf@Er7b^h;ZBA>))&R6k|=AM8yWp!MqiY8m+kakYgKJd ziThl+$ojtaWK=S{|22#gO*?W{A=Xi^v#N?gow&+wD9%j3sh)pHrgPy#DB7eUBzSST|6`j$8OFtk6{G9hp#$O8BJX2@J4LD zTD#<+Gk>w~LPgzjwJ%yDhFe=pdY<4y8D+_2+UvM#u^5IonvT4}rc8DA7uXM6?!exk zhI8r=+~=%bYTF(d!I~ZIN!?UW4tRn0sF-%NKVT%y|2c`)=rt9dO-GUKkTx9iwzZhp z9rg>2T66XO(TE2HIdrv?Y_%M5Pq$qXr?r^d3JdwD)R7JDS+kCA2|VrJ_`k>Je;9wv zKL!4;>L~p^J-PIpF{K}~k<|kJ8Eejog}I@y&*&+=MkJEmV;de=J&+Vvtz|ymi;I^S zk#FS~3OJuYcDo$JfcUYbb}?kL#Y2RV8}{iBP5ExUaU96dTTC7;9hds>=Ut5{uh#@w zYaLJJ;~P>%LtkpLGLD{om?mr%On{AZ1D>f37uR1yZ?R3EE#CRD#lToD3W=BcMPU$o9iWSy8l16kEXW(6UCB$Eo?F+gvXAVTHWf3A&CYD@6_HQqVg5;rIW$p}Z%` zZ$N!cs84a~tFS};ip=@)9NzEKrU>|N_aIZyXFylu`wqa@;r|~0>HBX`-*J3jf$vw~ z`vv}UIimRrdHu{(E*bt>q)%8Qoqvv|%4t>NcjIXkxdZW1j5i9iJ@csgd75-Piu=vx z4ZXKDXneN~qI!HF^aoGmhkILu?Onv?S^PjoubR%aA0yeyY=<0jCJ5)P zJ3>7+SN>=i37ds7@wj#dZ8ndfobO}{VbAjT#1Z9E_HecI-x5tHe;vxJP9iqJcUp(* z{;w-P9PR6b_B8_k?gIaI1pm$e|Gq=}4Z**vaam&18R!jHFO&;{VpvD(a2@~a)`zaW zy7uQ|J*8nieOq6Tu)ga24%-T>H?QyO|IgR+sKfh3qR^xp758(ZN1(fc_5rTM`o7=) zCz0O*_RCi?q?>m%H~n6ILVag4sDrzc8{|h{_Tb>bKO zXnM9afHt&zDRy0t#+%@;GV|RV5j%b=s~ywj`Ym(WUmr5~!h1PsI_A7;iAdY>S?2G8 zEnvnE;(Esza&ntQgAOTZ`Slo5J89&()^*A3;Y9fA&z1F3V_7rTk2B`hLww9QE^Rnd zXf8J3#5xmT)2S9_r{n15qVY7Kb0e};;~o+-PUg>RLg(uFup-+{UVIhL>SWl86gT5e zvABnvAraQJ1s|w}E!lHBVfrC~r2iNi?$wg)qwpTmpu0?8(Tco}d$7{HgDicR$mcqa z=Cli~S${PAWqKJ4Un z+iprSmjw*x=yWCaX1r5wC{;Q9X2Is!LrJmzsA|TmWZHgj2w7dTBok}gLsn*MD_(UW zgI3P0S-f2PL7l?+E>5tI>c)Y~aSy19Q01BQ;Jk1_nqn8#nEVv7Sm{VBEqcqSNTaStgeE38INnvydP?BBgNUx2NMW`f2swM$?2y^DKD&gMZqaSzF?vLnCt z$Wi(wg}B}EL7C0^llNbJxPDEVV^K{CZ>Y6J3=#)!2wM@wwEKk@Dh6^vjSUq&GSgnW zi1_xetP#J;kt|>51IB<^RsQ|*N|>^HcTFmXZbqt&gs*ljd7 zyNPpHFQv?WhP-Eftw?FwL#_|PoVRHSd!jLPvMPbveC{W{wjK-HNJE}<%SjBKiF}?* zn(@nQZ*KqKo8D%m%BuZ}{LmkW!Z}8k? zQgr()&qpp5d0wz<824Vj>8!>Wz<1{NbU8Y9D(A#wjWt;<#~+WTqKp^9pg4d+Ud