From 9f201e23e054d988bc06ed322f1bdc8ad4ce6bf4 Mon Sep 17 00:00:00 2001 From: John Wiggins Date: Thu, 18 Mar 2021 15:11:15 +0100 Subject: [PATCH] Create an independent renderer for draw_marker_at_points (#724) --- .gitignore | 1 + MANIFEST.in | 2 + kiva/_marker_renderer.pxd | 44 + kiva/_marker_renderer.pyx | 138 + kiva/api.py | 7 + kiva/celiagg.py | 36 +- kiva/marker_renderer.py | 53 + kiva/markers/LICENSES/LICENSE_Agg | 65 + kiva/markers/agg/agg_array.h | 1119 ++++++++ kiva/markers/agg/agg_basics.h | 553 ++++ kiva/markers/agg/agg_color_gray.h | 1047 ++++++++ kiva/markers/agg/agg_color_rgba.h | 1353 ++++++++++ kiva/markers/agg/agg_config.h | 44 + kiva/markers/agg/agg_dda_line.h | 290 ++ kiva/markers/agg/agg_ellipse_bresenham.h | 113 + kiva/markers/agg/agg_gamma_functions.h | 132 + kiva/markers/agg/agg_gamma_lut.h | 305 +++ kiva/markers/agg/agg_pixfmt_base.h | 97 + kiva/markers/agg/agg_pixfmt_rgb.h | 995 +++++++ kiva/markers/agg/agg_pixfmt_rgba.h | 2803 ++++++++++++++++++++ kiva/markers/agg/agg_renderer_base.h | 731 +++++ kiva/markers/agg/agg_renderer_markers.h | 711 +++++ kiva/markers/agg/agg_renderer_primitives.h | 224 ++ kiva/markers/agg/agg_rendering_buffer.h | 300 +++ kiva/markers/agg/agg_trans_affine.h | 518 ++++ kiva/markers/marker_renderer.h | 130 + kiva/tests/test_marker_rendering.py | 142 + setup.py | 16 + 28 files changed, 11967 insertions(+), 2 deletions(-) create mode 100644 kiva/_marker_renderer.pxd create mode 100644 kiva/_marker_renderer.pyx create mode 100644 kiva/marker_renderer.py create mode 100644 kiva/markers/LICENSES/LICENSE_Agg create mode 100644 kiva/markers/agg/agg_array.h create mode 100644 kiva/markers/agg/agg_basics.h create mode 100644 kiva/markers/agg/agg_color_gray.h create mode 100644 kiva/markers/agg/agg_color_rgba.h create mode 100644 kiva/markers/agg/agg_config.h create mode 100644 kiva/markers/agg/agg_dda_line.h create mode 100644 kiva/markers/agg/agg_ellipse_bresenham.h create mode 100644 kiva/markers/agg/agg_gamma_functions.h create mode 100644 kiva/markers/agg/agg_gamma_lut.h create mode 100644 kiva/markers/agg/agg_pixfmt_base.h create mode 100644 kiva/markers/agg/agg_pixfmt_rgb.h create mode 100644 kiva/markers/agg/agg_pixfmt_rgba.h create mode 100644 kiva/markers/agg/agg_renderer_base.h create mode 100644 kiva/markers/agg/agg_renderer_markers.h create mode 100644 kiva/markers/agg/agg_renderer_primitives.h create mode 100644 kiva/markers/agg/agg_rendering_buffer.h create mode 100644 kiva/markers/agg/agg_trans_affine.h create mode 100644 kiva/markers/marker_renderer.h create mode 100644 kiva/tests/test_marker_rendering.py diff --git a/.gitignore b/.gitignore index 74c12bc4b..1ccf897c2 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ build/ dist/ # SWIG & Cython intermediate files +kiva/_marker_renderer.cpp kiva/agg/agg.py kiva/agg/agg_wrap.cpp kiva/agg/plat_support.py diff --git a/MANIFEST.in b/MANIFEST.in index ec452f481..949dee0d4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,6 +9,7 @@ include docs/Makefile include docs/kiva/agg/notes include kiva/_cython_speedups.* include kiva/_hit_test.* +include kiva/_marker_renderer.* recursive-include docs *.py *.rst *.txt *.css *.png *.ico *.doc recursive-include enable/examples *.py *.svg *.jpg *.enaml recursive-include kiva/examples *.py *.txt *.gif *.jpg @@ -19,4 +20,5 @@ recursive-include kiva/agg/LICENSES * recursive-include kiva/fonttools/tests/data *.ttc *.ttf *.afm recursive-include kiva/fonttools/LICENSES * recursive-include kiva/gl *.h *.cpp *.i LICENSE_* +recursive-include kiva/markers *.h LICENSE_* recursive-include kiva/quartz *.pyx *.pxi *.pxd mac_context*.* diff --git a/kiva/_marker_renderer.pxd b/kiva/_marker_renderer.pxd new file mode 100644 index 000000000..ceadfe806 --- /dev/null +++ b/kiva/_marker_renderer.pxd @@ -0,0 +1,44 @@ +# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in LICENSE.txt and may be redistributed only under +# the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# +# Thanks for using Enthought open source! +from libcpp cimport bool + +cdef extern from "marker_renderer.h" namespace "agg24markers": + cdef cppclass pixfmt_abgr32: + pass + cdef cppclass pixfmt_argb32: + pass + cdef cppclass pixfmt_bgra32: + pass + cdef cppclass pixfmt_rgba32: + pass + cdef cppclass pixfmt_bgr24: + pass + cdef cppclass pixfmt_rgb24: + pass + + +cdef extern from "marker_renderer.h" namespace "kiva_markers": + # This is just here for the type signature + cdef enum marker_type: + pass + + # Abstract base class + cdef cppclass marker_renderer_base: + bool draw_markers(double* pts, unsigned Npts, + unsigned size, marker_type marker, + double* fill, double* stroke) + void transform(double sx, double sy, + double shx, double shy, + double tx, double ty) + + # Template class + cdef cppclass marker_renderer[pixfmt_T]: + marker_renderer(unsigned char* buf, unsigned width, unsigned height, + int stride, bool bottom_up) diff --git a/kiva/_marker_renderer.pyx b/kiva/_marker_renderer.pyx new file mode 100644 index 000000000..c2844692c --- /dev/null +++ b/kiva/_marker_renderer.pyx @@ -0,0 +1,138 @@ +# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in LICENSE.txt and may be redistributed only under +# the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# +# Thanks for using Enthought open source! +import cython +import numpy as np +from numpy cimport uint8_t + +cimport _marker_renderer + +ctypedef _marker_renderer.marker_renderer_base renderer_base_t + +@cython.internal +cdef class MarkerRendererBase: + cdef renderer_base_t* _this + cdef object py_array + + def __dealloc__(self): + del self._this + + cdef int base_init(self, image) except -1: + if image is None: + raise ValueError('image argument must not be None.') + + # Retain a reference to the memory view supplied to the constructor + # so that it lives as long as this object + self.py_array = image + + def draw_markers(self, points, size, marker, fill, stroke): + """draw_markers(points, size, marker, fill, stroke) + Draw markers at a collection of points. + + :param points: An Nx2 iterable of (x, y) points for marker positions + :param size: An integer pixel size for each marker + :param marker: A Kiva marker enum integer + :param fill: Fill color given as an iterable of 4 numbers (R, G, B, A) + :param stroke: Line color given as an iterable of 4 numbers (R, G, B, A) + + :returns: True if any markers were drawn, False otherwise + """ + cdef: + double[:,::1] _points = np.asarray(points, dtype=np.float64, order='c') + double[::1] _fill = np.asarray(fill, dtype=np.float64, order='c') + double[::1] _stroke = np.asarray(stroke, dtype=np.float64, order='c') + unsigned _size = size + _marker_renderer.marker_type _marker = <_marker_renderer.marker_type>marker + + if _points.shape[1] != 2: + msg = "points argument must be an iterable of (x, y) pairs." + raise ValueError(msg) + if _stroke.shape[0] != 4: + msg = "stroke argument must be an iterable of 4 numbers." + raise ValueError(msg) + if _fill.shape[0] != 4: + msg = "fill argument must be an iterable of 4 numbers." + raise ValueError(msg) + + return self._this.draw_markers( + &_points[0][0], _points.shape[0], _size, _marker, + &_fill[0], &_stroke[0] + ) + + def transform(self, sx, sy, shx, shy, tx, ty): + """transform(sx, sy, shx, shy, tx, ty) + Set the transform to be applied to the marker points and size. + + :param sx: Scale in X + :param sy: Scale in Y + :param shx: Shear in X + :param shy: Shear in Y + :param tx: Translation in X + :param ty: Translation in Y + """ + cdef: + double _sx = sx + double _sy = sy + double _shx = shx + double _shy = shy + double _tx = tx + double _ty = ty + + self._this.transform(_sx, _sy, _shx, _shy, _tx, _ty) + + +# Template specializations +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_abgr32] renderer_abgr32_t +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_argb32] renderer_argb32_t +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_bgra32] renderer_bgra32_t +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_rgba32] renderer_rgba32_t +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_bgr24] renderer_bgr24_t +ctypedef _marker_renderer.marker_renderer[_marker_renderer.pixfmt_rgb24] renderer_rgb24_t + +cdef class MarkerRendererABGR32(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_abgr32_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) + +cdef class MarkerRendererARGB32(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_argb32_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) + +cdef class MarkerRendererBGRA32(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_bgra32_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) + +cdef class MarkerRendererRGBA32(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_rgba32_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) + +cdef class MarkerRendererBGR24(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_bgr24_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) + +cdef class MarkerRendererRGB24(MarkerRendererBase): + def __cinit__(self, uint8_t[:,:,::1] image, bottom_up=True): + self.base_init(image) + self._this = new renderer_rgb24_t( + &image[0][0][0], image.shape[1], image.shape[0], image.strides[0], bottom_up + ) diff --git a/kiva/api.py b/kiva/api.py index 43688bf68..748616657 100644 --- a/kiva/api.py +++ b/kiva/api.py @@ -93,6 +93,12 @@ - :attr:`~.DOT_MARKER` - :attr:`~.PIXEL_MARKER` +Marker Renderer +=============== +This can be used by Kiva backends to implement :py:meth:`draw_marker_at_points` + +- :class:`~.MarkerRenderer` + Fonts ===== @@ -148,3 +154,4 @@ ) from ._cython_speedups import points_in_polygon from .fonttools import add_application_fonts, Font +from .marker_renderer import MarkerRenderer diff --git a/kiva/celiagg.py b/kiva/celiagg.py index 08a0421ea..9b3b25223 100644 --- a/kiva/celiagg.py +++ b/kiva/celiagg.py @@ -20,6 +20,7 @@ from kiva.abstract_graphics_context import AbstractGraphicsContext import kiva.constants as constants from kiva.fonttools import Font +from kiva.marker_renderer import MarkerRenderer # These are the symbols that a backend has to define. __all__ = ["CompiledPath", "Font", "font_metrics_provider", "GraphicsContext"] @@ -89,8 +90,12 @@ def __init__(self, size, *args, **kwargs): self.pix_format = kwargs.get('pix_format', 'rgba32') shape = (self._height, self._width, 4) + buffer = np.zeros(shape, dtype=np.uint8) canvas_klass = pix_format_canvases[self.pix_format] - self.gc = canvas_klass(np.zeros(shape, dtype=np.uint8), bottom_up=True) + self.gc = canvas_klass(buffer, bottom_up=True) + self.marker_gc = MarkerRenderer( + buffer, pix_format=self.pix_format, bottom_up=True + ) # init the state variables clip = agg.Rect(0, 0, self._width, self._height) @@ -826,6 +831,33 @@ def draw_path_at_points(self, points, path, mode=constants.FILL_STROKE): fill=self.fill_paint, ) + def draw_marker_at_points(self, points_array, size, + marker=constants.SQUARE_MARKER): + """ Draw a marker at a collection of points + """ + # Apply the current transform + ctm = self.transform + self.marker_gc.transform( + ctm.sx, ctm.sy, + ctm.shx, ctm.shy, + ctm.tx, ctm.ty, + ) + + # Grab the fill and stroke colors (where possible) + fill = (0.0, 0.0, 0.0, 0.0) + stroke = (0.0, 0.0, 0.0, 1.0) + if isinstance(self.fill_paint, agg.SolidPaint): + fp = self.fill_paint + fill = (fp.r, fp.g, fp.b, fp.a) + if isinstance(self.stroke_paint, agg.SolidPaint): + sp = self.stroke_paint + stroke = (sp.r, sp.g, sp.b, sp.a) + + # Draw using the marker renderer + return self.marker_gc.draw_markers( + points_array, size, marker, fill, stroke + ) + def save(self, filename, file_format=None, pil_options=None): """ Save the contents of the context to a file """ @@ -841,7 +873,7 @@ def save(self, filename, file_format=None, pil_options=None): os.path.splitext(filename)[1][1:] if isinstance(filename, str) else '' ) - + # Check the output format to see if it can handle an alpha channel. no_alpha_formats = ('jpg', 'bmp', 'eps', 'jpeg') if ext in no_alpha_formats or file_format.lower() in no_alpha_formats: diff --git a/kiva/marker_renderer.py b/kiva/marker_renderer.py new file mode 100644 index 000000000..d02efe2d1 --- /dev/null +++ b/kiva/marker_renderer.py @@ -0,0 +1,53 @@ +# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in LICENSE.txt and may be redistributed only under +# the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# +# Thanks for using Enthought open source! +from kiva._marker_renderer import ( + MarkerRendererABGR32, MarkerRendererARGB32, MarkerRendererBGR24, + MarkerRendererBGRA32, MarkerRendererRGB24, MarkerRendererRGBA32, +) + +__all__ = ["MarkerRenderer"] + +_renderers = { + "abgr32": (MarkerRendererABGR32, 4), + "argb32": (MarkerRendererARGB32, 4), + "bgra32": (MarkerRendererBGRA32, 4), + "rgba32": (MarkerRendererRGBA32, 4), + "bgr24": (MarkerRendererBGR24, 3), + "rgb24": (MarkerRendererRGB24, 3), +} + + +def MarkerRenderer(buffer, pix_format="bgra32", bottom_up=True): + """ MarkerRenderer(buffer, pix_format="bgra32", bottom_up=True) + Create a specialized renderer for implementing ``draw_marker_at_points``. + + Parameters + ---------- + buffer : ndarray + A MxNx{3,4} numpy array of uint8 to be used as the backing pixel store + pix_format : str + A string specifying the pixel format. Same as what it passed to + ``GraphicsContext``. + bottom_up : bool [optional, defaults to True] + If True, the origin is bottom-left instead of top-left. + + Returns + ------- + renderer : A new MarkerRenderer instance. + """ + klass, components = _renderers.get(pix_format, (None, 0)) + if klass is None: + raise ValueError(f"{pix_format} is not a supported pixel format") + + if (str(buffer.dtype) != "uint8" or buffer.ndim != 3 + or buffer.shape[2] != components): + raise ValueError(f"Pixel buffer must be MxNx{components} and uint8") + + return klass(buffer, bottom_up=bottom_up) diff --git a/kiva/markers/LICENSES/LICENSE_Agg b/kiva/markers/LICENSES/LICENSE_Agg new file mode 100644 index 000000000..f17681401 --- /dev/null +++ b/kiva/markers/LICENSES/LICENSE_Agg @@ -0,0 +1,65 @@ +The Anti-Grain Geometry Project +A high quality rendering engine for C++ +http://antigrain.com + +Anti-Grain Geometry has dual licensing model. The Modified BSD +License was first added in version v2.4 just for convenience. +It is a simple, permissive non-copyleft free software license, +compatible with the GNU GPL. It's well proven and recognizable. +See http://www.fsf.org/licensing/licenses/index_html#ModifiedBSD +for details. + +Note that the Modified BSD license DOES NOT restrict your rights +if you choose the Anti-Grain Geometry Public License. + + + + +Anti-Grain Geometry Public License +==================================================== + +Anti-Grain Geometry - Version 2.4 +Copyright (C) 2002-2005 Maxim Shemanarev (McSeem) + +Permission to copy, use, modify, sell and distribute this software +is granted provided this copyright notice appears in all copies. +This software is provided "as is" without express or implied +warranty, and with no claim as to its suitability for any purpose. + + + + + +Modified BSD License +==================================================== +Anti-Grain Geometry - Version 2.4 +Copyright (C) 2002-2005 Maxim Shemanarev (McSeem) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + diff --git a/kiva/markers/agg/agg_array.h b/kiva/markers/agg/agg_array.h new file mode 100644 index 000000000..bd323f48e --- /dev/null +++ b/kiva/markers/agg/agg_array.h @@ -0,0 +1,1119 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +#ifndef AGG_ARRAY_INCLUDED +#define AGG_ARRAY_INCLUDED + +#include +#include +#include "agg_basics.h" + +namespace agg24markers +{ + + //-------------------------------------------------------pod_array_adaptor + template class pod_array_adaptor + { + public: + typedef T value_type; + pod_array_adaptor(T* array, unsigned size) : + m_array(array), m_size(size) {} + + unsigned size() const { return m_size; } + const T& operator [] (unsigned i) const { return m_array[i]; } + T& operator [] (unsigned i) { return m_array[i]; } + const T& at(unsigned i) const { return m_array[i]; } + T& at(unsigned i) { return m_array[i]; } + T value_at(unsigned i) const { return m_array[i]; } + + private: + T* m_array; + unsigned m_size; + }; + + + //---------------------------------------------------------pod_auto_array + template class pod_auto_array + { + public: + typedef T value_type; + typedef pod_auto_array self_type; + + pod_auto_array() {} + explicit pod_auto_array(const T* c) + { + memcpy(m_array, c, sizeof(T) * Size); + } + + const self_type& operator = (const T* c) + { + memcpy(m_array, c, sizeof(T) * Size); + return *this; + } + + static unsigned size() { return Size; } + const T& operator [] (unsigned i) const { return m_array[i]; } + T& operator [] (unsigned i) { return m_array[i]; } + const T& at(unsigned i) const { return m_array[i]; } + T& at(unsigned i) { return m_array[i]; } + T value_at(unsigned i) const { return m_array[i]; } + + private: + T m_array[Size]; + }; + + + //--------------------------------------------------------pod_auto_vector + template class pod_auto_vector + { + public: + typedef T value_type; + typedef pod_auto_vector self_type; + + pod_auto_vector() : m_size(0) {} + + void remove_all() { m_size = 0; } + void clear() { m_size = 0; } + void add(const T& v) { m_array[m_size++] = v; } + void push_back(const T& v) { m_array[m_size++] = v; } + void inc_size(unsigned size) { m_size += size; } + + unsigned size() const { return m_size; } + const T& operator [] (unsigned i) const { return m_array[i]; } + T& operator [] (unsigned i) { return m_array[i]; } + const T& at(unsigned i) const { return m_array[i]; } + T& at(unsigned i) { return m_array[i]; } + T value_at(unsigned i) const { return m_array[i]; } + + private: + T m_array[Size]; + unsigned m_size; + }; + + + //---------------------------------------------------------------pod_array + template class pod_array + { + public: + typedef T value_type; + typedef pod_array self_type; + + ~pod_array() { pod_allocator::deallocate(m_array, m_size); } + pod_array() : m_array(0), m_size(0) {} + + pod_array(unsigned size) : + m_array(pod_allocator::allocate(size)), + m_size(size) + {} + + pod_array(const self_type& v) : + m_array(pod_allocator::allocate(v.m_size)), + m_size(v.m_size) + { + memcpy(m_array, v.m_array, sizeof(T) * m_size); + } + + void resize(unsigned size) + { + if(size != m_size) + { + pod_allocator::deallocate(m_array, m_size); + m_array = pod_allocator::allocate(m_size = size); + } + } + const self_type& operator = (const self_type& v) + { + resize(v.size()); + memcpy(m_array, v.m_array, sizeof(T) * m_size); + return *this; + } + + unsigned size() const { return m_size; } + const T& operator [] (unsigned i) const { return m_array[i]; } + T& operator [] (unsigned i) { return m_array[i]; } + const T& at(unsigned i) const { return m_array[i]; } + T& at(unsigned i) { return m_array[i]; } + T value_at(unsigned i) const { return m_array[i]; } + + const T* data() const { return m_array; } + T* data() { return m_array; } + private: + T* m_array; + unsigned m_size; + }; + + + + //--------------------------------------------------------------pod_vector + // A simple class template to store Plain Old Data, a vector + // of a fixed size. The data is continous in memory + //------------------------------------------------------------------------ + template class pod_vector + { + public: + typedef T value_type; + + ~pod_vector() { pod_allocator::deallocate(m_array, m_capacity); } + pod_vector() : m_size(0), m_capacity(0), m_array(0) {} + pod_vector(unsigned cap, unsigned extra_tail=0); + + // Copying + pod_vector(const pod_vector&); + const pod_vector& operator = (const pod_vector&); + + // Set new capacity. All data is lost, size is set to zero. + void capacity(unsigned cap, unsigned extra_tail=0); + unsigned capacity() const { return m_capacity; } + + // Allocate n elements. All data is lost, + // but elements can be accessed in range 0...size-1. + void allocate(unsigned size, unsigned extra_tail=0); + + // Resize keeping the content. + void resize(unsigned new_size); + + void zero() + { + memset(m_array, 0, sizeof(T) * m_size); + } + + void add(const T& v) { m_array[m_size++] = v; } + void push_back(const T& v) { m_array[m_size++] = v; } + void insert_at(unsigned pos, const T& val); + void inc_size(unsigned size) { m_size += size; } + unsigned size() const { return m_size; } + unsigned byte_size() const { return m_size * sizeof(T); } + void serialize(int8u* ptr) const; + void deserialize(const int8u* data, unsigned byte_size); + const T& operator [] (unsigned i) const { return m_array[i]; } + T& operator [] (unsigned i) { return m_array[i]; } + const T& at(unsigned i) const { return m_array[i]; } + T& at(unsigned i) { return m_array[i]; } + T value_at(unsigned i) const { return m_array[i]; } + + const T* data() const { return m_array; } + T* data() { return m_array; } + + void remove_all() { m_size = 0; } + void clear() { m_size = 0; } + void cut_at(unsigned num) { if(num < m_size) m_size = num; } + + private: + unsigned m_size; + unsigned m_capacity; + T* m_array; + }; + + //------------------------------------------------------------------------ + template + void pod_vector::capacity(unsigned cap, unsigned extra_tail) + { + m_size = 0; + if(cap > m_capacity) + { + pod_allocator::deallocate(m_array, m_capacity); + m_capacity = cap + extra_tail; + m_array = m_capacity ? pod_allocator::allocate(m_capacity) : 0; + } + } + + //------------------------------------------------------------------------ + template + void pod_vector::allocate(unsigned size, unsigned extra_tail) + { + capacity(size, extra_tail); + m_size = size; + } + + + //------------------------------------------------------------------------ + template + void pod_vector::resize(unsigned new_size) + { + if(new_size > m_size) + { + if(new_size > m_capacity) + { + T* data = pod_allocator::allocate(new_size); + memcpy(data, m_array, m_size * sizeof(T)); + pod_allocator::deallocate(m_array, m_capacity); + m_array = data; + } + } + else + { + m_size = new_size; + } + } + + //------------------------------------------------------------------------ + template pod_vector::pod_vector(unsigned cap, unsigned extra_tail) : + m_size(0), + m_capacity(cap + extra_tail), + m_array(pod_allocator::allocate(m_capacity)) {} + + //------------------------------------------------------------------------ + template pod_vector::pod_vector(const pod_vector& v) : + m_size(v.m_size), + m_capacity(v.m_capacity), + m_array(v.m_capacity ? pod_allocator::allocate(v.m_capacity) : 0) + { + memcpy(m_array, v.m_array, sizeof(T) * v.m_size); + } + + //------------------------------------------------------------------------ + template const pod_vector& + pod_vector::operator = (const pod_vector&v) + { + allocate(v.m_size); + if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size); + return *this; + } + + //------------------------------------------------------------------------ + template void pod_vector::serialize(int8u* ptr) const + { + if(m_size) memcpy(ptr, m_array, m_size * sizeof(T)); + } + + //------------------------------------------------------------------------ + template + void pod_vector::deserialize(const int8u* data, unsigned byte_size) + { + byte_size /= sizeof(T); + allocate(byte_size); + if(byte_size) memcpy(m_array, data, byte_size * sizeof(T)); + } + + //------------------------------------------------------------------------ + template + void pod_vector::insert_at(unsigned pos, const T& val) + { + if(pos >= m_size) + { + m_array[m_size] = val; + } + else + { + memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T)); + m_array[pos] = val; + } + ++m_size; + } + + //---------------------------------------------------------------pod_bvector + // A simple class template to store Plain Old Data, similar to std::deque + // It doesn't reallocate memory but instead, uses blocks of data of size + // of (1 << S), that is, power of two. The data is NOT contiguous in memory, + // so the only valid access method is operator [] or curr(), prev(), next() + // + // There reallocs occure only when the pool of pointers to blocks needs + // to be extended (it happens very rarely). You can control the value + // of increment to reallocate the pointer buffer. See the second constructor. + // By default, the incremeent value equals (1 << S), i.e., the block size. + //------------------------------------------------------------------------ + template class pod_bvector + { + public: + enum block_scale_e + { + block_shift = S, + block_size = 1 << block_shift, + block_mask = block_size - 1 + }; + + typedef T value_type; + + ~pod_bvector(); + pod_bvector(); + pod_bvector(unsigned block_ptr_inc); + + // Copying + pod_bvector(const pod_bvector& v); + const pod_bvector& operator = (const pod_bvector& v); + + void remove_all() { m_size = 0; } + void clear() { m_size = 0; } + void free_all() { free_tail(0); } + void free_tail(unsigned size); + void add(const T& val); + void push_back(const T& val) { add(val); } + void modify_last(const T& val); + void remove_last(); + + int allocate_continuous_block(unsigned num_elements); + + void add_array(const T* ptr, unsigned num_elem) + { + while(num_elem--) + { + add(*ptr++); + } + } + + template void add_data(DataAccessor& data) + { + while(data.size()) + { + add(*data); + ++data; + } + } + + void cut_at(unsigned size) + { + if(size < m_size) m_size = size; + } + + unsigned size() const { return m_size; } + + const T& operator [] (unsigned i) const + { + return m_blocks[i >> block_shift][i & block_mask]; + } + + T& operator [] (unsigned i) + { + return m_blocks[i >> block_shift][i & block_mask]; + } + + const T& at(unsigned i) const + { + return m_blocks[i >> block_shift][i & block_mask]; + } + + T& at(unsigned i) + { + return m_blocks[i >> block_shift][i & block_mask]; + } + + T value_at(unsigned i) const + { + return m_blocks[i >> block_shift][i & block_mask]; + } + + const T& curr(unsigned idx) const + { + return (*this)[idx]; + } + + T& curr(unsigned idx) + { + return (*this)[idx]; + } + + const T& prev(unsigned idx) const + { + return (*this)[(idx + m_size - 1) % m_size]; + } + + T& prev(unsigned idx) + { + return (*this)[(idx + m_size - 1) % m_size]; + } + + const T& next(unsigned idx) const + { + return (*this)[(idx + 1) % m_size]; + } + + T& next(unsigned idx) + { + return (*this)[(idx + 1) % m_size]; + } + + const T& last() const + { + return (*this)[m_size - 1]; + } + + T& last() + { + return (*this)[m_size - 1]; + } + + unsigned byte_size() const; + void serialize(int8u* ptr) const; + void deserialize(const int8u* data, unsigned byte_size); + void deserialize(unsigned start, const T& empty_val, + const int8u* data, unsigned byte_size); + + template + void deserialize(ByteAccessor data) + { + remove_all(); + unsigned elem_size = data.size() / sizeof(T); + + for(unsigned i = 0; i < elem_size; ++i) + { + int8u* ptr = (int8u*)data_ptr(); + for(unsigned j = 0; j < sizeof(T); ++j) + { + *ptr++ = *data; + ++data; + } + ++m_size; + } + } + + template + void deserialize(unsigned start, const T& empty_val, ByteAccessor data) + { + while(m_size < start) + { + add(empty_val); + } + + unsigned elem_size = data.size() / sizeof(T); + for(unsigned i = 0; i < elem_size; ++i) + { + int8u* ptr; + if(start + i < m_size) + { + ptr = (int8u*)(&((*this)[start + i])); + } + else + { + ptr = (int8u*)data_ptr(); + ++m_size; + } + for(unsigned j = 0; j < sizeof(T); ++j) + { + *ptr++ = *data; + ++data; + } + } + } + + const T* block(unsigned nb) const { return m_blocks[nb]; } + + private: + void allocate_block(unsigned nb); + T* data_ptr(); + + unsigned m_size; + unsigned m_num_blocks; + unsigned m_max_blocks; + T** m_blocks; + unsigned m_block_ptr_inc; + }; + + + //------------------------------------------------------------------------ + template pod_bvector::~pod_bvector() + { + if(m_num_blocks) + { + T** blk = m_blocks + m_num_blocks - 1; + while(m_num_blocks--) + { + pod_allocator::deallocate(*blk, block_size); + --blk; + } + } + pod_allocator::deallocate(m_blocks, m_max_blocks); + } + + + //------------------------------------------------------------------------ + template + void pod_bvector::free_tail(unsigned size) + { + if(size < m_size) + { + unsigned nb = (size + block_mask) >> block_shift; + while(m_num_blocks > nb) + { + pod_allocator::deallocate(m_blocks[--m_num_blocks], block_size); + } + if(m_num_blocks == 0) + { + pod_allocator::deallocate(m_blocks, m_max_blocks); + m_blocks = 0; + m_max_blocks = 0; + } + m_size = size; + } + } + + + //------------------------------------------------------------------------ + template pod_bvector::pod_bvector() : + m_size(0), + m_num_blocks(0), + m_max_blocks(0), + m_blocks(0), + m_block_ptr_inc(block_size) + { + } + + + //------------------------------------------------------------------------ + template + pod_bvector::pod_bvector(unsigned block_ptr_inc) : + m_size(0), + m_num_blocks(0), + m_max_blocks(0), + m_blocks(0), + m_block_ptr_inc(block_ptr_inc) + { + } + + + //------------------------------------------------------------------------ + template + pod_bvector::pod_bvector(const pod_bvector& v) : + m_size(v.m_size), + m_num_blocks(v.m_num_blocks), + m_max_blocks(v.m_max_blocks), + m_blocks(v.m_max_blocks ? + pod_allocator::allocate(v.m_max_blocks) : + 0), + m_block_ptr_inc(v.m_block_ptr_inc) + { + unsigned i; + for(i = 0; i < v.m_num_blocks; ++i) + { + m_blocks[i] = pod_allocator::allocate(block_size); + memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T)); + } + } + + + //------------------------------------------------------------------------ + template + const pod_bvector& + pod_bvector::operator = (const pod_bvector& v) + { + unsigned i; + for(i = m_num_blocks; i < v.m_num_blocks; ++i) + { + allocate_block(i); + } + for(i = 0; i < v.m_num_blocks; ++i) + { + memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T)); + } + m_size = v.m_size; + return *this; + } + + + //------------------------------------------------------------------------ + template + void pod_bvector::allocate_block(unsigned nb) + { + if(nb >= m_max_blocks) + { + T** new_blocks = pod_allocator::allocate(m_max_blocks + m_block_ptr_inc); + + if(m_blocks) + { + memcpy(new_blocks, + m_blocks, + m_num_blocks * sizeof(T*)); + + pod_allocator::deallocate(m_blocks, m_max_blocks); + } + m_blocks = new_blocks; + m_max_blocks += m_block_ptr_inc; + } + m_blocks[nb] = pod_allocator::allocate(block_size); + m_num_blocks++; + } + + + + //------------------------------------------------------------------------ + template + inline T* pod_bvector::data_ptr() + { + unsigned nb = m_size >> block_shift; + if(nb >= m_num_blocks) + { + allocate_block(nb); + } + return m_blocks[nb] + (m_size & block_mask); + } + + + + //------------------------------------------------------------------------ + template + inline void pod_bvector::add(const T& val) + { + *data_ptr() = val; + ++m_size; + } + + + //------------------------------------------------------------------------ + template + inline void pod_bvector::remove_last() + { + if(m_size) --m_size; + } + + + //------------------------------------------------------------------------ + template + void pod_bvector::modify_last(const T& val) + { + remove_last(); + add(val); + } + + + //------------------------------------------------------------------------ + template + int pod_bvector::allocate_continuous_block(unsigned num_elements) + { + if(num_elements < block_size) + { + data_ptr(); // Allocate initial block if necessary + unsigned rest = block_size - (m_size & block_mask); + unsigned index; + if(num_elements <= rest) + { + // The rest of the block is good, we can use it + //----------------- + index = m_size; + m_size += num_elements; + return index; + } + + // New block + //--------------- + m_size += rest; + data_ptr(); + index = m_size; + m_size += num_elements; + return index; + } + return -1; // Impossible to allocate + } + + + //------------------------------------------------------------------------ + template + unsigned pod_bvector::byte_size() const + { + return m_size * sizeof(T); + } + + + //------------------------------------------------------------------------ + template + void pod_bvector::serialize(int8u* ptr) const + { + unsigned i; + for(i = 0; i < m_size; i++) + { + memcpy(ptr, &(*this)[i], sizeof(T)); + ptr += sizeof(T); + } + } + + //------------------------------------------------------------------------ + template + void pod_bvector::deserialize(const int8u* data, unsigned byte_size) + { + remove_all(); + byte_size /= sizeof(T); + for(unsigned i = 0; i < byte_size; ++i) + { + T* ptr = data_ptr(); + memcpy(ptr, data, sizeof(T)); + ++m_size; + data += sizeof(T); + } + } + + + // Replace or add a number of elements starting from "start" position + //------------------------------------------------------------------------ + template + void pod_bvector::deserialize(unsigned start, const T& empty_val, + const int8u* data, unsigned byte_size) + { + while(m_size < start) + { + add(empty_val); + } + + byte_size /= sizeof(T); + for(unsigned i = 0; i < byte_size; ++i) + { + if(start + i < m_size) + { + memcpy(&((*this)[start + i]), data, sizeof(T)); + } + else + { + T* ptr = data_ptr(); + memcpy(ptr, data, sizeof(T)); + ++m_size; + } + data += sizeof(T); + } + } + + + //---------------------------------------------------------block_allocator + // Allocator for arbitrary POD data. Most usable in different cache + // systems for efficient memory allocations. + // Memory is allocated with blocks of fixed size ("block_size" in + // the constructor). If required size exceeds the block size the allocator + // creates a new block of the required size. However, the most efficient + // use is when the average reqired size is much less than the block size. + //------------------------------------------------------------------------ + class block_allocator + { + struct block_type + { + int8u* data; + unsigned size; + }; + + public: + void remove_all() + { + if(m_num_blocks) + { + block_type* blk = m_blocks + m_num_blocks - 1; + while(m_num_blocks--) + { + pod_allocator::deallocate(blk->data, blk->size); + --blk; + } + pod_allocator::deallocate(m_blocks, m_max_blocks); + } + m_num_blocks = 0; + m_max_blocks = 0; + m_blocks = 0; + m_buf_ptr = 0; + m_rest = 0; + } + + ~block_allocator() + { + remove_all(); + } + + block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) : + m_block_size(block_size), + m_block_ptr_inc(block_ptr_inc), + m_num_blocks(0), + m_max_blocks(0), + m_blocks(0), + m_buf_ptr(0), + m_rest(0) + { + } + + + int8u* allocate(unsigned size, unsigned alignment=1) + { + if(size == 0) return 0; + if(size <= m_rest) + { + int8u* ptr = m_buf_ptr; + if(alignment > 1) + { + unsigned align = + (alignment - unsigned((size_t)ptr) % alignment) % alignment; + + size += align; + ptr += align; + if(size <= m_rest) + { + m_rest -= size; + m_buf_ptr += size; + return ptr; + } + allocate_block(size); + return allocate(size - align, alignment); + } + m_rest -= size; + m_buf_ptr += size; + return ptr; + } + allocate_block(size + alignment - 1); + return allocate(size, alignment); + } + + + private: + void allocate_block(unsigned size) + { + if(size < m_block_size) size = m_block_size; + if(m_num_blocks >= m_max_blocks) + { + block_type* new_blocks = + pod_allocator::allocate(m_max_blocks + m_block_ptr_inc); + + if(m_blocks) + { + memcpy(new_blocks, + m_blocks, + m_num_blocks * sizeof(block_type)); + pod_allocator::deallocate(m_blocks, m_max_blocks); + } + m_blocks = new_blocks; + m_max_blocks += m_block_ptr_inc; + } + + m_blocks[m_num_blocks].size = size; + m_blocks[m_num_blocks].data = + m_buf_ptr = + pod_allocator::allocate(size); + + m_num_blocks++; + m_rest = size; + } + + unsigned m_block_size; + unsigned m_block_ptr_inc; + unsigned m_num_blocks; + unsigned m_max_blocks; + block_type* m_blocks; + int8u* m_buf_ptr; + unsigned m_rest; + }; + + + + + + + + + //------------------------------------------------------------------------ + enum quick_sort_threshold_e + { + quick_sort_threshold = 9 + }; + + + //-----------------------------------------------------------swap_elements + template inline void swap_elements(T& a, T& b) + { + T temp = a; + a = b; + b = temp; + } + + + //--------------------------------------------------------------quick_sort + template + void quick_sort(Array& arr, Less less) + { + if(arr.size() < 2) return; + + typename Array::value_type* e1; + typename Array::value_type* e2; + + int stack[80]; + int* top = stack; + int limit = arr.size(); + int base = 0; + + for(;;) + { + int len = limit - base; + + int i; + int j; + int pivot; + + if(len > quick_sort_threshold) + { + // we use base + len/2 as the pivot + pivot = base + len / 2; + swap_elements(arr[base], arr[pivot]); + + i = base + 1; + j = limit - 1; + + // now ensure that *i <= *base <= *j + e1 = &(arr[j]); + e2 = &(arr[i]); + if(less(*e1, *e2)) swap_elements(*e1, *e2); + + e1 = &(arr[base]); + e2 = &(arr[i]); + if(less(*e1, *e2)) swap_elements(*e1, *e2); + + e1 = &(arr[j]); + e2 = &(arr[base]); + if(less(*e1, *e2)) swap_elements(*e1, *e2); + + for(;;) + { + do i++; while( less(arr[i], arr[base]) ); + do j--; while( less(arr[base], arr[j]) ); + + if( i > j ) + { + break; + } + + swap_elements(arr[i], arr[j]); + } + + swap_elements(arr[base], arr[j]); + + // now, push the largest sub-array + if(j - base > limit - i) + { + top[0] = base; + top[1] = j; + base = i; + } + else + { + top[0] = i; + top[1] = limit; + limit = j; + } + top += 2; + } + else + { + // the sub-array is small, perform insertion sort + j = base; + i = j + 1; + + for(; i < limit; j = i, i++) + { + for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--) + { + swap_elements(*e1, *e2); + if(j == base) + { + break; + } + } + } + if(top > stack) + { + top -= 2; + base = top[0]; + limit = top[1]; + } + else + { + break; + } + } + } + } + + + + + //------------------------------------------------------remove_duplicates + // Remove duplicates from a sorted array. It doesn't cut the + // tail of the array, it just returns the number of remaining elements. + //----------------------------------------------------------------------- + template + unsigned remove_duplicates(Array& arr, Equal equal) + { + if(arr.size() < 2) return arr.size(); + + unsigned i, j; + for(i = 1, j = 1; i < arr.size(); i++) + { + typename Array::value_type& e = arr[i]; + if(!equal(e, arr[i - 1])) + { + arr[j++] = e; + } + } + return j; + } + + //--------------------------------------------------------invert_container + template void invert_container(Array& arr) + { + int i = 0; + int j = arr.size() - 1; + while(i < j) + { + swap_elements(arr[i++], arr[j--]); + } + } + + //------------------------------------------------------binary_search_pos + template + unsigned binary_search_pos(const Array& arr, const Value& val, Less less) + { + if(arr.size() == 0) return 0; + + unsigned beg = 0; + unsigned end = arr.size() - 1; + + if(less(val, arr[0])) return 0; + if(less(arr[end], val)) return end + 1; + + while(end - beg > 1) + { + unsigned mid = (end + beg) >> 1; + if(less(val, arr[mid])) end = mid; + else beg = mid; + } + + //if(beg <= 0 && less(val, arr[0])) return 0; + //if(end >= arr.size() - 1 && less(arr[end], val)) ++end; + + return end; + } + + //----------------------------------------------------------range_adaptor + template class range_adaptor + { + public: + typedef typename Array::value_type value_type; + + range_adaptor(Array& array, unsigned start, unsigned size) : + m_array(array), m_start(start), m_size(size) + {} + + unsigned size() const { return m_size; } + const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; } + value_type& operator [] (unsigned i) { return m_array[m_start + i]; } + const value_type& at(unsigned i) const { return m_array[m_start + i]; } + value_type& at(unsigned i) { return m_array[m_start + i]; } + value_type value_at(unsigned i) const { return m_array[m_start + i]; } + + private: + Array& m_array; + unsigned m_start; + unsigned m_size; + }; + + //---------------------------------------------------------------int_less + inline bool int_less(int a, int b) { return a < b; } + + //------------------------------------------------------------int_greater + inline bool int_greater(int a, int b) { return a > b; } + + //----------------------------------------------------------unsigned_less + inline bool unsigned_less(unsigned a, unsigned b) { return a < b; } + + //-------------------------------------------------------unsigned_greater + inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; } +} + +#endif diff --git a/kiva/markers/agg/agg_basics.h b/kiva/markers/agg/agg_basics.h new file mode 100644 index 000000000..bc98e00b1 --- /dev/null +++ b/kiva/markers/agg/agg_basics.h @@ -0,0 +1,553 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- + +#ifndef AGG_BASICS_INCLUDED +#define AGG_BASICS_INCLUDED + +#include +#include "agg_config.h" + +namespace agg24markers +{ + // The policy of all AGG containers and memory allocation strategy + // in general is that no allocated data requires explicit construction. + // It means that the allocator can be really simple; you can even + // replace new/delete to malloc/free. The constructors and destructors + // won't be called in this case, however everything will remain working. + // The second argument of deallocate() is the size of the allocated + // block. You can use this information if you wish. + //------------------------------------------------------------pod_allocator + template struct pod_allocator + { + static T* allocate(unsigned num) { return new T [num]; } + static void deallocate(T* ptr, unsigned) { delete [] ptr; } + }; + + // Single object allocator. It's also can be replaced with your custom + // allocator. The difference is that it can only allocate a single + // object and the constructor and destructor must be called. + // In AGG there is no need to allocate an array of objects with + // calling their constructors (only single ones). So that, if you + // replace these new/delete to malloc/free make sure that the in-place + // new is called and take care of calling the destructor too. + //------------------------------------------------------------obj_allocator + template struct obj_allocator + { + static T* allocate() { return new T; } + static void deallocate(T* ptr) { delete ptr; } + }; +} + + +//-------------------------------------------------------- Default basic types +// +// If the compiler has different capacity of the basic types you can redefine +// them via the compiler command line or by generating agg_config.h that is +// empty by default. +// +#ifndef AGG_INT8 +#define AGG_INT8 signed char +#endif + +#ifndef AGG_INT8U +#define AGG_INT8U unsigned char +#endif + +#ifndef AGG_INT16 +#define AGG_INT16 short +#endif + +#ifndef AGG_INT16U +#define AGG_INT16U unsigned short +#endif + +#ifndef AGG_INT32 +#define AGG_INT32 int +#endif + +#ifndef AGG_INT32U +#define AGG_INT32U unsigned +#endif + +#ifndef AGG_INT64 +#if defined(_MSC_VER) || defined(__BORLANDC__) +#define AGG_INT64 signed __int64 +#else +#define AGG_INT64 signed long long +#endif +#endif + +#ifndef AGG_INT64U +#if defined(_MSC_VER) || defined(__BORLANDC__) +#define AGG_INT64U unsigned __int64 +#else +#define AGG_INT64U unsigned long long +#endif +#endif + +//------------------------------------------------ Some fixes for MS Visual C++ +#if defined(_MSC_VER) +#pragma warning(disable:4786) // Identifier was truncated... +#endif + +#if defined(_MSC_VER) +#define AGG_INLINE __forceinline +#else +#define AGG_INLINE inline +#endif + +namespace agg24markers +{ + //------------------------------------------------------------------------- + typedef AGG_INT8 int8; //----int8 + typedef AGG_INT8U int8u; //----int8u + typedef AGG_INT16 int16; //----int16 + typedef AGG_INT16U int16u; //----int16u + typedef AGG_INT32 int32; //----int32 + typedef AGG_INT32U int32u; //----int32u + typedef AGG_INT64 int64; //----int64 + typedef AGG_INT64U int64u; //----int64u + +#if defined(AGG_FISTP) +#pragma warning(push) +#pragma warning(disable : 4035) //Disable warning "no return value" + AGG_INLINE int iround(double v) //-------iround + { + int t; + __asm fld qword ptr [v] + __asm fistp dword ptr [t] + __asm mov eax, dword ptr [t] + } + AGG_INLINE unsigned uround(double v) //-------uround + { + unsigned t; + __asm fld qword ptr [v] + __asm fistp dword ptr [t] + __asm mov eax, dword ptr [t] + } +#pragma warning(pop) + AGG_INLINE int ifloor(double v) + { + return int(floor(v)); + } + AGG_INLINE unsigned ufloor(double v) //-------ufloor + { + return unsigned(floor(v)); + } + AGG_INLINE int iceil(double v) + { + return int(ceil(v)); + } + AGG_INLINE unsigned uceil(double v) //--------uceil + { + return unsigned(ceil(v)); + } +#elif defined(AGG_QIFIST) + AGG_INLINE int iround(double v) + { + return int(v); + } + AGG_INLINE int uround(double v) + { + return unsigned(v); + } + AGG_INLINE int ifloor(double v) + { + return int(floor(v)); + } + AGG_INLINE unsigned ufloor(double v) + { + return unsigned(floor(v)); + } + AGG_INLINE int iceil(double v) + { + return int(ceil(v)); + } + AGG_INLINE unsigned uceil(double v) + { + return unsigned(ceil(v)); + } +#else + AGG_INLINE int iround(double v) + { + return int((v < 0.0) ? v - 0.5 : v + 0.5); + } + AGG_INLINE int uround(double v) + { + return unsigned(v + 0.5); + } + AGG_INLINE int ifloor(double v) + { + int i = int(v); + return i - (i > v); + } + AGG_INLINE unsigned ufloor(double v) + { + return unsigned(v); + } + AGG_INLINE int iceil(double v) + { + return int(ceil(v)); + } + AGG_INLINE unsigned uceil(double v) + { + return unsigned(ceil(v)); + } +#endif + + //---------------------------------------------------------------saturation + template struct saturation + { + AGG_INLINE static int iround(double v) + { + if(v < double(-Limit)) return -Limit; + if(v > double( Limit)) return Limit; + return agg24markers::iround(v); + } + }; + + //------------------------------------------------------------------mul_one + template struct mul_one + { + AGG_INLINE static unsigned mul(unsigned a, unsigned b) + { + register unsigned q = a * b + (1 << (Shift-1)); + return (q + (q >> Shift)) >> Shift; + } + }; + + //------------------------------------------------------------------------- + typedef unsigned char cover_type; //----cover_type + enum cover_scale_e + { + cover_shift = 8, //----cover_shift + cover_size = 1 << cover_shift, //----cover_size + cover_mask = cover_size - 1, //----cover_mask + cover_none = 0, //----cover_none + cover_full = cover_mask //----cover_full + }; + + //----------------------------------------------------poly_subpixel_scale_e + // These constants determine the subpixel accuracy, to be more precise, + // the number of bits of the fractional part of the coordinates. + // The possible coordinate capacity in bits can be calculated by formula: + // sizeof(int) * 8 - poly_subpixel_shift, i.e, for 32-bit integers and + // 8-bits fractional part the capacity is 24 bits. + enum poly_subpixel_scale_e + { + poly_subpixel_shift = 8, //----poly_subpixel_shift + poly_subpixel_scale = 1< struct rect_base + { + typedef T value_type; + typedef rect_base self_type; + T x1, y1, x2, y2; + + rect_base() {} + rect_base(T x1_, T y1_, T x2_, T y2_) : + x1(x1_), y1(y1_), x2(x2_), y2(y2_) {} + + void init(T x1_, T y1_, T x2_, T y2_) + { + x1 = x1_; y1 = y1_; x2 = x2_; y2 = y2_; + } + + const self_type& normalize() + { + T t; + if(x1 > x2) { t = x1; x1 = x2; x2 = t; } + if(y1 > y2) { t = y1; y1 = y2; y2 = t; } + return *this; + } + + bool clip(const self_type& r) + { + if(x2 > r.x2) x2 = r.x2; + if(y2 > r.y2) y2 = r.y2; + if(x1 < r.x1) x1 = r.x1; + if(y1 < r.y1) y1 = r.y1; + return x1 <= x2 && y1 <= y2; + } + + bool is_valid() const + { + return x1 <= x2 && y1 <= y2; + } + + bool hit_test(T x, T y) const + { + return (x >= x1 && x <= x2 && y >= y1 && y <= y2); + } + + bool overlaps(const self_type& r) const + { + return !(r.x1 > x2 || r.x2 < x1 + || r.y1 > y2 || r.y2 < y1); + } + }; + + //-----------------------------------------------------intersect_rectangles + template + inline Rect intersect_rectangles(const Rect& r1, const Rect& r2) + { + Rect r = r1; + + // First process x2,y2 because the other order + // results in Internal Compiler Error under + // Microsoft Visual C++ .NET 2003 69462-335-0000007-18038 in + // case of "Maximize Speed" optimization option. + //----------------- + if(r.x2 > r2.x2) r.x2 = r2.x2; + if(r.y2 > r2.y2) r.y2 = r2.y2; + if(r.x1 < r2.x1) r.x1 = r2.x1; + if(r.y1 < r2.y1) r.y1 = r2.y1; + return r; + } + + + //---------------------------------------------------------unite_rectangles + template + inline Rect unite_rectangles(const Rect& r1, const Rect& r2) + { + Rect r = r1; + if(r.x2 < r2.x2) r.x2 = r2.x2; + if(r.y2 < r2.y2) r.y2 = r2.y2; + if(r.x1 > r2.x1) r.x1 = r2.x1; + if(r.y1 > r2.y1) r.y1 = r2.y1; + return r; + } + + typedef rect_base rect_i; //----rect_i + typedef rect_base rect_f; //----rect_f + typedef rect_base rect_d; //----rect_d + + //---------------------------------------------------------path_commands_e + enum path_commands_e + { + path_cmd_stop = 0, //----path_cmd_stop + path_cmd_move_to = 1, //----path_cmd_move_to + path_cmd_line_to = 2, //----path_cmd_line_to + path_cmd_curve3 = 3, //----path_cmd_curve3 + path_cmd_curve4 = 4, //----path_cmd_curve4 + path_cmd_curveN = 5, //----path_cmd_curveN + path_cmd_catrom = 6, //----path_cmd_catrom + path_cmd_ubspline = 7, //----path_cmd_ubspline + path_cmd_end_poly = 0x0F, //----path_cmd_end_poly + path_cmd_mask = 0x0F //----path_cmd_mask + }; + + //------------------------------------------------------------path_flags_e + enum path_flags_e + { + path_flags_none = 0, //----path_flags_none + path_flags_ccw = 0x10, //----path_flags_ccw + path_flags_cw = 0x20, //----path_flags_cw + path_flags_close = 0x40, //----path_flags_close + path_flags_mask = 0xF0 //----path_flags_mask + }; + + //---------------------------------------------------------------is_vertex + inline bool is_vertex(unsigned c) + { + return c >= path_cmd_move_to && c < path_cmd_end_poly; + } + + //--------------------------------------------------------------is_drawing + inline bool is_drawing(unsigned c) + { + return c >= path_cmd_line_to && c < path_cmd_end_poly; + } + + //-----------------------------------------------------------------is_stop + inline bool is_stop(unsigned c) + { + return c == path_cmd_stop; + } + + //--------------------------------------------------------------is_move_to + inline bool is_move_to(unsigned c) + { + return c == path_cmd_move_to; + } + + //--------------------------------------------------------------is_line_to + inline bool is_line_to(unsigned c) + { + return c == path_cmd_line_to; + } + + //----------------------------------------------------------------is_curve + inline bool is_curve(unsigned c) + { + return c == path_cmd_curve3 || c == path_cmd_curve4; + } + + //---------------------------------------------------------------is_curve3 + inline bool is_curve3(unsigned c) + { + return c == path_cmd_curve3; + } + + //---------------------------------------------------------------is_curve4 + inline bool is_curve4(unsigned c) + { + return c == path_cmd_curve4; + } + + //-------------------------------------------------------------is_end_poly + inline bool is_end_poly(unsigned c) + { + return (c & path_cmd_mask) == path_cmd_end_poly; + } + + //----------------------------------------------------------------is_close + inline bool is_close(unsigned c) + { + return (c & ~(path_flags_cw | path_flags_ccw)) == + (path_cmd_end_poly | path_flags_close); + } + + //------------------------------------------------------------is_next_poly + inline bool is_next_poly(unsigned c) + { + return is_stop(c) || is_move_to(c) || is_end_poly(c); + } + + //-------------------------------------------------------------------is_cw + inline bool is_cw(unsigned c) + { + return (c & path_flags_cw) != 0; + } + + //------------------------------------------------------------------is_ccw + inline bool is_ccw(unsigned c) + { + return (c & path_flags_ccw) != 0; + } + + //-------------------------------------------------------------is_oriented + inline bool is_oriented(unsigned c) + { + return (c & (path_flags_cw | path_flags_ccw)) != 0; + } + + //---------------------------------------------------------------is_closed + inline bool is_closed(unsigned c) + { + return (c & path_flags_close) != 0; + } + + //----------------------------------------------------------get_close_flag + inline unsigned get_close_flag(unsigned c) + { + return c & path_flags_close; + } + + //-------------------------------------------------------clear_orientation + inline unsigned clear_orientation(unsigned c) + { + return c & ~(path_flags_cw | path_flags_ccw); + } + + //---------------------------------------------------------get_orientation + inline unsigned get_orientation(unsigned c) + { + return c & (path_flags_cw | path_flags_ccw); + } + + //---------------------------------------------------------set_orientation + inline unsigned set_orientation(unsigned c, unsigned o) + { + return clear_orientation(c) | o; + } + + //--------------------------------------------------------------point_base + template struct point_base + { + typedef T value_type; + T x,y; + point_base() {} + point_base(T x_, T y_) : x(x_), y(y_) {} + }; + typedef point_base point_i; //-----point_i + typedef point_base point_f; //-----point_f + typedef point_base point_d; //-----point_d + + //-------------------------------------------------------------vertex_base + template struct vertex_base + { + typedef T value_type; + T x,y; + unsigned cmd; + vertex_base() {} + vertex_base(T x_, T y_, unsigned cmd_) : x(x_), y(y_), cmd(cmd_) {} + }; + typedef vertex_base vertex_i; //-----vertex_i + typedef vertex_base vertex_f; //-----vertex_f + typedef vertex_base vertex_d; //-----vertex_d + + //----------------------------------------------------------------row_info + template struct row_info + { + int x1, x2; + T* ptr; + row_info() {} + row_info(int x1_, int x2_, T* ptr_) : x1(x1_), x2(x2_), ptr(ptr_) {} + }; + + //----------------------------------------------------------const_row_info + template struct const_row_info + { + int x1, x2; + const T* ptr; + const_row_info() {} + const_row_info(int x1_, int x2_, const T* ptr_) : + x1(x1_), x2(x2_), ptr(ptr_) {} + }; + + //------------------------------------------------------------is_equal_eps + template inline bool is_equal_eps(T v1, T v2, T epsilon) + { + return fabs(v1 - v2) <= double(epsilon); + } +} + +#endif diff --git a/kiva/markers/agg/agg_color_gray.h b/kiva/markers/agg/agg_color_gray.h new file mode 100644 index 000000000..35b4857a2 --- /dev/null +++ b/kiva/markers/agg/agg_color_gray.h @@ -0,0 +1,1047 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// Adaptation for high precision colors has been sponsored by +// Liberty Technology Systems, Inc., visit http://lib-sys.com +// +// Liberty Technology Systems, Inc. is the provider of +// PostScript and PDF technology for software developers. +// +//---------------------------------------------------------------------------- +// +// color types gray8, gray16 +// +//---------------------------------------------------------------------------- + +#ifndef AGG_COLOR_GRAY_INCLUDED +#define AGG_COLOR_GRAY_INCLUDED + +#include "agg_basics.h" +#include "agg_color_rgba.h" + +namespace agg24markers +{ + + //===================================================================gray8 + template + struct gray8T + { + typedef int8u value_type; + typedef int32u calc_type; + typedef int32 long_type; + enum base_scale_e + { + base_shift = 8, + base_scale = 1 << base_shift, + base_mask = base_scale - 1, + base_MSB = 1 << (base_shift - 1) + }; + typedef gray8T self_type; + + value_type v; + value_type a; + + static value_type luminance(const rgba& c) + { + // Calculate grayscale value as per ITU-R BT.709. + return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask)); + } + + static value_type luminance(const rgba8& c) + { + // Calculate grayscale value as per ITU-R BT.709. + return value_type((55u * c.r + 184u * c.g + 18u * c.b) >> 8); + } + + static void convert(gray8T& dst, const gray8T& src) + { + dst.v = sRGB_conv::rgb_from_sRGB(src.v); + dst.a = src.a; + } + + static void convert(gray8T& dst, const gray8T& src) + { + dst.v = sRGB_conv::rgb_to_sRGB(src.v); + dst.a = src.a; + } + + static void convert(gray8T& dst, const rgba8& src) + { + dst.v = luminance(src); + dst.a = src.a; + } + + static void convert(gray8T& dst, const srgba8& src) + { + // The RGB weights are only valid for linear values. + convert(dst, rgba8(src)); + } + + static void convert(gray8T& dst, const rgba8& src) + { + dst.v = sRGB_conv::rgb_to_sRGB(luminance(src)); + dst.a = src.a; + } + + static void convert(gray8T& dst, const srgba8& src) + { + // The RGB weights are only valid for linear values. + convert(dst, rgba8(src)); + } + + //-------------------------------------------------------------------- + gray8T() {} + + //-------------------------------------------------------------------- + explicit gray8T(unsigned v_, unsigned a_ = base_mask) : + v(int8u(v_)), a(int8u(a_)) {} + + //-------------------------------------------------------------------- + gray8T(const self_type& c, unsigned a_) : + v(c.v), a(value_type(a_)) {} + + //-------------------------------------------------------------------- + gray8T(const rgba& c) : + v(luminance(c)), + a(value_type(uround(c.a * base_mask))) {} + + //-------------------------------------------------------------------- + template + gray8T(const gray8T& c) + { + convert(*this, c); + } + + //-------------------------------------------------------------------- + template + gray8T(const rgba8T& c) + { + convert(*this, c); + } + + //-------------------------------------------------------------------- + template + T convert_from_sRGB() const + { + typename T::value_type y = sRGB_conv::rgb_from_sRGB(v); + return T(y, y, y, sRGB_conv::alpha_from_sRGB(a)); + } + + template + T convert_to_sRGB() const + { + typename T::value_type y = sRGB_conv::rgb_to_sRGB(v); + return T(y, y, y, sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + rgba8 make_rgba8(const linear&) const + { + return rgba8(v, v, v, a); + } + + rgba8 make_rgba8(const sRGB&) const + { + return convert_from_sRGB(); + } + + operator rgba8() const + { + return make_rgba8(Colorspace()); + } + + //-------------------------------------------------------------------- + srgba8 make_srgba8(const linear&) const + { + return convert_to_sRGB(); + } + + srgba8 make_srgba8(const sRGB&) const + { + return srgba8(v, v, v, a); + } + + operator srgba8() const + { + return make_rgba8(Colorspace()); + } + + //-------------------------------------------------------------------- + rgba16 make_rgba16(const linear&) const + { + rgba16::value_type rgb = (v << 8) | v; + return rgba16(rgb, rgb, rgb, (a << 8) | a); + } + + rgba16 make_rgba16(const sRGB&) const + { + return convert_from_sRGB(); + } + + operator rgba16() const + { + return make_rgba16(Colorspace()); + } + + //-------------------------------------------------------------------- + rgba32 make_rgba32(const linear&) const + { + rgba32::value_type v32 = v / 255.0f; + return rgba32(v32, v32, v32, a / 255.0f); + } + + rgba32 make_rgba32(const sRGB&) const + { + return convert_from_sRGB(); + } + + operator rgba32() const + { + return make_rgba32(Colorspace()); + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return double(a) / base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(uround(a * base_mask)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return base_mask; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a == 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a == base_mask; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int8u. + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + calc_type t = a * b + base_MSB; + return value_type(((t >> base_shift) + t) >> base_shift); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + if (a * b == 0) + { + return 0; + } + else if (a >= b) + { + return base_mask; + } + else return value_type((a * base_mask + (b >> 1)) / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a >> base_shift; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return a >> n; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int8u. + // Specifically for multiplying a color component by a cover. + static AGG_INLINE value_type mult_cover(value_type a, value_type b) + { + return multiply(a, b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return multiply(b, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return p + q - multiply(p, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + int t = (q - p) * a + base_MSB - (p > q); + return value_type(p + (((t >> base_shift) + t) >> base_shift)); + } + + //-------------------------------------------------------------------- + self_type& clear() + { + v = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + else if (a_ > 1) a = 1; + else a = (value_type)uround(a_ * double(base_mask)); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return double(a) / double(base_mask); + } + + //-------------------------------------------------------------------- + self_type& premultiply() + { + if (a < base_mask) + { + if (a == 0) v = 0; + else v = multiply(v, a); + } + return *this; + } + + //-------------------------------------------------------------------- + self_type& demultiply() + { + if (a < base_mask) + { + if (a == 0) + { + v = 0; + } + else + { + calc_type v_ = (calc_type(v) * base_mask) / a; + v = value_type((v_ > base_mask) ? (value_type)base_mask : v_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + self_type gradient(self_type c, double k) const + { + self_type ret; + calc_type ik = uround(k * base_scale); + ret.v = lerp(v, c.v, ik); + ret.a = lerp(a, c.a, ik); + return ret; + } + + //-------------------------------------------------------------------- + AGG_INLINE void add(const self_type& c, unsigned cover) + { + calc_type cv, ca; + if (cover == cover_mask) + { + if (c.a == base_mask) + { + *this = c; + return; + } + else + { + cv = v + c.v; + ca = a + c.a; + } + } + else + { + cv = v + mult_cover(c.v, cover); + ca = a + mult_cover(c.a, cover); + } + v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv); + a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0); } + }; + + typedef gray8T gray8; + typedef gray8T sgray8; + + + //==================================================================gray16 + struct gray16 + { + typedef int16u value_type; + typedef int32u calc_type; + typedef int64 long_type; + enum base_scale_e + { + base_shift = 16, + base_scale = 1 << base_shift, + base_mask = base_scale - 1, + base_MSB = 1 << (base_shift - 1) + }; + typedef gray16 self_type; + + value_type v; + value_type a; + + static value_type luminance(const rgba& c) + { + // Calculate grayscale value as per ITU-R BT.709. + return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask)); + } + + static value_type luminance(const rgba16& c) + { + // Calculate grayscale value as per ITU-R BT.709. + return value_type((13933u * c.r + 46872u * c.g + 4732u * c.b) >> 16); + } + + static value_type luminance(const rgba8& c) + { + return luminance(rgba16(c)); + } + + static value_type luminance(const srgba8& c) + { + return luminance(rgba16(c)); + } + + static value_type luminance(const rgba32& c) + { + return luminance(rgba(c)); + } + + //-------------------------------------------------------------------- + gray16() {} + + //-------------------------------------------------------------------- + explicit gray16(unsigned v_, unsigned a_ = base_mask) : + v(int16u(v_)), a(int16u(a_)) {} + + //-------------------------------------------------------------------- + gray16(const self_type& c, unsigned a_) : + v(c.v), a(value_type(a_)) {} + + //-------------------------------------------------------------------- + gray16(const rgba& c) : + v(luminance(c)), + a((value_type)uround(c.a * double(base_mask))) {} + + //-------------------------------------------------------------------- + gray16(const rgba8& c) : + v(luminance(c)), + a((value_type(c.a) << 8) | c.a) {} + + //-------------------------------------------------------------------- + gray16(const srgba8& c) : + v(luminance(c)), + a((value_type(c.a) << 8) | c.a) {} + + //-------------------------------------------------------------------- + gray16(const rgba16& c) : + v(luminance(c)), + a(c.a) {} + + //-------------------------------------------------------------------- + gray16(const gray8& c) : + v((value_type(c.v) << 8) | c.v), + a((value_type(c.a) << 8) | c.a) {} + + //-------------------------------------------------------------------- + gray16(const sgray8& c) : + v(sRGB_conv::rgb_from_sRGB(c.v)), + a(sRGB_conv::alpha_from_sRGB(c.a)) {} + + //-------------------------------------------------------------------- + operator rgba8() const + { + return rgba8(v >> 8, v >> 8, v >> 8, a >> 8); + } + + //-------------------------------------------------------------------- + operator srgba8() const + { + value_type y = sRGB_conv::rgb_to_sRGB(v); + return srgba8(y, y, y, sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + operator rgba16() const + { + return rgba16(v, v, v, a); + } + + //-------------------------------------------------------------------- + operator rgba32() const + { + rgba32::value_type v32 = v / 65535.0f; + return rgba32(v32, v32, v32, a / 65535.0f); + } + + //-------------------------------------------------------------------- + operator gray8() const + { + return gray8(v >> 8, a >> 8); + } + + //-------------------------------------------------------------------- + operator sgray8() const + { + return sgray8( + sRGB_conv::rgb_to_sRGB(v), + sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return double(a) / base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(uround(a * base_mask)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return base_mask; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a == 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a == base_mask; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int16u. + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + calc_type t = a * b + base_MSB; + return value_type(((t >> base_shift) + t) >> base_shift); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + if (a * b == 0) + { + return 0; + } + else if (a >= b) + { + return base_mask; + } + else return value_type((a * base_mask + (b >> 1)) / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a >> base_shift; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return a >> n; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, almost exact over int16u. + // Specifically for multiplying a color component by a cover. + static AGG_INLINE value_type mult_cover(value_type a, cover_type b) + { + return multiply(a, b << 8 | b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return mult_cover(b, a) >> 8; + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return p + q - multiply(p, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + int t = (q - p) * a + base_MSB - (p > q); + return value_type(p + (((t >> base_shift) + t) >> base_shift)); + } + + //-------------------------------------------------------------------- + self_type& clear() + { + v = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + else if(a_ > 1) a = 1; + else a = (value_type)uround(a_ * double(base_mask)); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return double(a) / double(base_mask); + } + + + //-------------------------------------------------------------------- + self_type& premultiply() + { + if (a < base_mask) + { + if(a == 0) v = 0; + else v = multiply(v, a); + } + return *this; + } + + //-------------------------------------------------------------------- + self_type& demultiply() + { + if (a < base_mask) + { + if (a == 0) + { + v = 0; + } + else + { + calc_type v_ = (calc_type(v) * base_mask) / a; + v = value_type((v_ > base_mask) ? base_mask : v_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + self_type gradient(self_type c, double k) const + { + self_type ret; + calc_type ik = uround(k * base_scale); + ret.v = lerp(v, c.v, ik); + ret.a = lerp(a, c.a, ik); + return ret; + } + + //-------------------------------------------------------------------- + AGG_INLINE void add(const self_type& c, unsigned cover) + { + calc_type cv, ca; + if (cover == cover_mask) + { + if (c.a == base_mask) + { + *this = c; + return; + } + else + { + cv = v + c.v; + ca = a + c.a; + } + } + else + { + cv = v + mult_cover(c.v, cover); + ca = a + mult_cover(c.a, cover); + } + v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv); + a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0); } + }; + + + //===================================================================gray32 + struct gray32 + { + typedef float value_type; + typedef double calc_type; + typedef double long_type; + typedef gray32 self_type; + + value_type v; + value_type a; + + // Calculate grayscale value as per ITU-R BT.709. + static value_type luminance(double r, double g, double b) + { + return value_type(0.2126 * r + 0.7152 * g + 0.0722 * b); + } + + static value_type luminance(const rgba& c) + { + return luminance(c.r, c.g, c.b); + } + + static value_type luminance(const rgba32& c) + { + return luminance(c.r, c.g, c.b); + } + + static value_type luminance(const rgba8& c) + { + return luminance(c.r / 255.0, c.g / 255.0, c.g / 255.0); + } + + static value_type luminance(const rgba16& c) + { + return luminance(c.r / 65535.0, c.g / 65535.0, c.g / 65535.0); + } + + //-------------------------------------------------------------------- + gray32() {} + + //-------------------------------------------------------------------- + explicit gray32(value_type v_, value_type a_ = 1) : + v(v_), a(a_) {} + + //-------------------------------------------------------------------- + gray32(const self_type& c, value_type a_) : + v(c.v), a(a_) {} + + //-------------------------------------------------------------------- + gray32(const rgba& c) : + v(luminance(c)), + a(value_type(c.a)) {} + + //-------------------------------------------------------------------- + gray32(const rgba8& c) : + v(luminance(c)), + a(value_type(c.a / 255.0)) {} + + //-------------------------------------------------------------------- + gray32(const srgba8& c) : + v(luminance(rgba32(c))), + a(value_type(c.a / 255.0)) {} + + //-------------------------------------------------------------------- + gray32(const rgba16& c) : + v(luminance(c)), + a(value_type(c.a / 65535.0)) {} + + //-------------------------------------------------------------------- + gray32(const rgba32& c) : + v(luminance(c)), + a(value_type(c.a)) {} + + //-------------------------------------------------------------------- + gray32(const gray8& c) : + v(value_type(c.v / 255.0)), + a(value_type(c.a / 255.0)) {} + + //-------------------------------------------------------------------- + gray32(const sgray8& c) : + v(sRGB_conv::rgb_from_sRGB(c.v)), + a(sRGB_conv::alpha_from_sRGB(c.a)) {} + + //-------------------------------------------------------------------- + gray32(const gray16& c) : + v(value_type(c.v / 65535.0)), + a(value_type(c.a / 65535.0)) {} + + //-------------------------------------------------------------------- + operator rgba() const + { + return rgba(v, v, v, a); + } + + //-------------------------------------------------------------------- + operator gray8() const + { + return gray8(uround(v * 255.0), uround(a * 255.0)); + } + + //-------------------------------------------------------------------- + operator sgray8() const + { + // Return (non-premultiplied) sRGB values. + return sgray8( + sRGB_conv::rgb_to_sRGB(v), + sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + operator gray16() const + { + return gray16(uround(v * 65535.0), uround(a * 65535.0)); + } + + //-------------------------------------------------------------------- + operator rgba8() const + { + rgba8::value_type y = uround(v * 255.0); + return rgba8(y, y, y, uround(a * 255.0)); + } + + //-------------------------------------------------------------------- + operator srgba8() const + { + srgba8::value_type y = sRGB_conv::rgb_to_sRGB(v); + return srgba8(y, y, y, sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + operator rgba16() const + { + rgba16::value_type y = uround(v * 65535.0); + return rgba16(y, y, y, uround(a * 65535.0)); + } + + //-------------------------------------------------------------------- + operator rgba32() const + { + return rgba32(v, v, v, a); + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(a); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return 1; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a <= 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a >= 1; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type invert(value_type x) + { + return 1 - x; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + return value_type(a * b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + return (b == 0) ? 0 : value_type(a / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return n > 0 ? a / (1 << n) : a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type mult_cover(value_type a, cover_type b) + { + return value_type(a * b / cover_mask); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return cover_type(uround(a * b)); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return (1 - a) * p + q; // more accurate than "p + q - p * a" + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + // The form "p + a * (q - p)" avoids a multiplication, but may produce an + // inaccurate result. For example, "p + (q - p)" may not be exactly equal + // to q. Therefore, stick to the basic expression, which at least produces + // the correct result at either extreme. + return (1 - a) * p + a * q; + } + + //-------------------------------------------------------------------- + self_type& clear() + { + v = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + else if (a_ > 1) a = 1; + else a = value_type(a_); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return a; + } + + + //-------------------------------------------------------------------- + self_type& premultiply() + { + if (a < 0) v = 0; + else if(a < 1) v *= a; + return *this; + } + + //-------------------------------------------------------------------- + self_type& demultiply() + { + if (a < 0) v = 0; + else if (a < 1) v /= a; + return *this; + } + + //-------------------------------------------------------------------- + self_type gradient(self_type c, double k) const + { + return self_type( + value_type(v + (c.v - v) * k), + value_type(a + (c.a - a) * k)); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0); } + }; +} + + + + +#endif diff --git a/kiva/markers/agg/agg_color_rgba.h b/kiva/markers/agg/agg_color_rgba.h new file mode 100644 index 000000000..873083f5f --- /dev/null +++ b/kiva/markers/agg/agg_color_rgba.h @@ -0,0 +1,1353 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// +// Adaptation for high precision colors has been sponsored by +// Liberty Technology Systems, Inc., visit http://lib-sys.com +// +// Liberty Technology Systems, Inc. is the provider of +// PostScript and PDF technology for software developers. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- + +#ifndef AGG_COLOR_RGBA_INCLUDED +#define AGG_COLOR_RGBA_INCLUDED + +#include +#include "agg_basics.h" +#include "agg_gamma_lut.h" + +namespace agg24markers +{ + // Supported component orders for RGB and RGBA pixel formats + //======================================================================= + struct order_rgb { enum rgb_e { R=0, G=1, B=2, N=3 }; }; + struct order_bgr { enum bgr_e { B=0, G=1, R=2, N=3 }; }; + struct order_rgba { enum rgba_e { R=0, G=1, B=2, A=3, N=4 }; }; + struct order_argb { enum argb_e { A=0, R=1, G=2, B=3, N=4 }; }; + struct order_abgr { enum abgr_e { A=0, B=1, G=2, R=3, N=4 }; }; + struct order_bgra { enum bgra_e { B=0, G=1, R=2, A=3, N=4 }; }; + + // Colorspace tag types. + struct linear {}; + struct sRGB {}; + + //====================================================================rgba + struct rgba + { + typedef double value_type; + + double r; + double g; + double b; + double a; + + //-------------------------------------------------------------------- + rgba() {} + + //-------------------------------------------------------------------- + rgba(double r_, double g_, double b_, double a_=1.0) : + r(r_), g(g_), b(b_), a(a_) {} + + //-------------------------------------------------------------------- + rgba(const rgba& c, double a_) : r(c.r), g(c.g), b(c.b), a(a_) {} + + //-------------------------------------------------------------------- + rgba& clear() + { + r = g = b = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + rgba& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + rgba& opacity(double a_) + { + if (a_ < 0) a = 0; + else if (a_ > 1) a = 1; + else a = a_; + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return a; + } + + //-------------------------------------------------------------------- + rgba& premultiply() + { + r *= a; + g *= a; + b *= a; + return *this; + } + + //-------------------------------------------------------------------- + rgba& premultiply(double a_) + { + if (a <= 0 || a_ <= 0) + { + r = g = b = a = 0; + } + else + { + a_ /= a; + r *= a_; + g *= a_; + b *= a_; + a = a_; + } + return *this; + } + + //-------------------------------------------------------------------- + rgba& demultiply() + { + if (a == 0) + { + r = g = b = 0; + } + else + { + double a_ = 1.0 / a; + r *= a_; + g *= a_; + b *= a_; + } + return *this; + } + + + //-------------------------------------------------------------------- + rgba gradient(rgba c, double k) const + { + rgba ret; + ret.r = r + (c.r - r) * k; + ret.g = g + (c.g - g) * k; + ret.b = b + (c.b - b) * k; + ret.a = a + (c.a - a) * k; + return ret; + } + + rgba& operator+=(const rgba& c) + { + r += c.r; + g += c.g; + b += c.b; + a += c.a; + return *this; + } + + rgba& operator*=(double k) + { + r *= k; + g *= k; + b *= k; + a *= k; + return *this; + } + + //-------------------------------------------------------------------- + static rgba no_color() { return rgba(0,0,0,0); } + + //-------------------------------------------------------------------- + static rgba from_wavelength(double wl, double gamma = 1.0); + + //-------------------------------------------------------------------- + explicit rgba(double wavelen, double gamma=1.0) + { + *this = from_wavelength(wavelen, gamma); + } + + }; + + inline rgba operator+(const rgba& a, const rgba& b) + { + return rgba(a) += b; + } + + inline rgba operator*(const rgba& a, double b) + { + return rgba(a) *= b; + } + + //------------------------------------------------------------------------ + inline rgba rgba::from_wavelength(double wl, double gamma) + { + rgba t(0.0, 0.0, 0.0); + + if (wl >= 380.0 && wl <= 440.0) + { + t.r = -1.0 * (wl - 440.0) / (440.0 - 380.0); + t.b = 1.0; + } + else if (wl >= 440.0 && wl <= 490.0) + { + t.g = (wl - 440.0) / (490.0 - 440.0); + t.b = 1.0; + } + else if (wl >= 490.0 && wl <= 510.0) + { + t.g = 1.0; + t.b = -1.0 * (wl - 510.0) / (510.0 - 490.0); + } + else if (wl >= 510.0 && wl <= 580.0) + { + t.r = (wl - 510.0) / (580.0 - 510.0); + t.g = 1.0; + } + else if (wl >= 580.0 && wl <= 645.0) + { + t.r = 1.0; + t.g = -1.0 * (wl - 645.0) / (645.0 - 580.0); + } + else if (wl >= 645.0 && wl <= 780.0) + { + t.r = 1.0; + } + + double s = 1.0; + if (wl > 700.0) s = 0.3 + 0.7 * (780.0 - wl) / (780.0 - 700.0); + else if (wl < 420.0) s = 0.3 + 0.7 * (wl - 380.0) / (420.0 - 380.0); + + t.r = pow(t.r * s, gamma); + t.g = pow(t.g * s, gamma); + t.b = pow(t.b * s, gamma); + return t; + } + + inline rgba rgba_pre(double r, double g, double b, double a) + { + return rgba(r, g, b, a).premultiply(); + } + + + //===================================================================rgba8 + template + struct rgba8T + { + typedef int8u value_type; + typedef int32u calc_type; + typedef int32 long_type; + enum base_scale_e + { + base_shift = 8, + base_scale = 1 << base_shift, + base_mask = base_scale - 1, + base_MSB = 1 << (base_shift - 1) + }; + typedef rgba8T self_type; + + + value_type r; + value_type g; + value_type b; + value_type a; + + static void convert(rgba8T& dst, const rgba8T& src) + { + dst.r = sRGB_conv::rgb_from_sRGB(src.r); + dst.g = sRGB_conv::rgb_from_sRGB(src.g); + dst.b = sRGB_conv::rgb_from_sRGB(src.b); + dst.a = src.a; + } + + static void convert(rgba8T& dst, const rgba8T& src) + { + dst.r = sRGB_conv::rgb_to_sRGB(src.r); + dst.g = sRGB_conv::rgb_to_sRGB(src.g); + dst.b = sRGB_conv::rgb_to_sRGB(src.b); + dst.a = src.a; + } + + static void convert(rgba8T& dst, const rgba& src) + { + dst.r = value_type(uround(src.r * base_mask)); + dst.g = value_type(uround(src.g * base_mask)); + dst.b = value_type(uround(src.b * base_mask)); + dst.a = value_type(uround(src.a * base_mask)); + } + + static void convert(rgba8T& dst, const rgba& src) + { + // Use the "float" table. + dst.r = sRGB_conv::rgb_to_sRGB(float(src.r)); + dst.g = sRGB_conv::rgb_to_sRGB(float(src.g)); + dst.b = sRGB_conv::rgb_to_sRGB(float(src.b)); + dst.a = sRGB_conv::alpha_to_sRGB(float(src.a)); + } + + static void convert(rgba& dst, const rgba8T& src) + { + dst.r = src.r / 255.0; + dst.g = src.g / 255.0; + dst.b = src.b / 255.0; + dst.a = src.a / 255.0; + } + + static void convert(rgba& dst, const rgba8T& src) + { + // Use the "float" table. + dst.r = sRGB_conv::rgb_from_sRGB(src.r); + dst.g = sRGB_conv::rgb_from_sRGB(src.g); + dst.b = sRGB_conv::rgb_from_sRGB(src.b); + dst.a = sRGB_conv::alpha_from_sRGB(src.a); + } + + //-------------------------------------------------------------------- + rgba8T() {} + + //-------------------------------------------------------------------- + rgba8T(unsigned r_, unsigned g_, unsigned b_, unsigned a_ = base_mask) : + r(value_type(r_)), + g(value_type(g_)), + b(value_type(b_)), + a(value_type(a_)) {} + + //-------------------------------------------------------------------- + rgba8T(const rgba& c) + { + convert(*this, c); + } + + //-------------------------------------------------------------------- + rgba8T(const self_type& c, unsigned a_) : + r(c.r), g(c.g), b(c.b), a(value_type(a_)) {} + + //-------------------------------------------------------------------- + template + rgba8T(const rgba8T& c) + { + convert(*this, c); + } + + //-------------------------------------------------------------------- + operator rgba() const + { + rgba c; + convert(c, *this); + return c; + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return double(a) / base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(uround(a * base_mask)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return base_mask; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a == 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a == base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type invert(value_type x) + { + return base_mask - x; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int8u. + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + calc_type t = a * b + base_MSB; + return value_type(((t >> base_shift) + t) >> base_shift); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + if (a * b == 0) + { + return 0; + } + else if (a >= b) + { + return base_mask; + } + else return value_type((a * base_mask + (b >> 1)) / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a >> base_shift; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return a >> n; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int8u. + // Specifically for multiplying a color component by a cover. + static AGG_INLINE value_type mult_cover(value_type a, cover_type b) + { + return multiply(a, b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return multiply(b, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return p + q - multiply(p, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + int t = (q - p) * a + base_MSB - (p > q); + return value_type(p + (((t >> base_shift) + t) >> base_shift)); + } + + //-------------------------------------------------------------------- + self_type& clear() + { + r = g = b = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + else if (a_ > 1) a = 1; + else a = (value_type)uround(a_ * double(base_mask)); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return double(a) / double(base_mask); + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& premultiply() + { + if (a != base_mask) + { + if (a == 0) + { + r = g = b = 0; + } + else + { + r = multiply(r, a); + g = multiply(g, a); + b = multiply(b, a); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& premultiply(unsigned a_) + { + if (a != base_mask || a_ < base_mask) + { + if (a == 0 || a_ == 0) + { + r = g = b = a = 0; + } + else + { + calc_type r_ = (calc_type(r) * a_) / a; + calc_type g_ = (calc_type(g) * a_) / a; + calc_type b_ = (calc_type(b) * a_) / a; + r = value_type((r_ > a_) ? a_ : r_); + g = value_type((g_ > a_) ? a_ : g_); + b = value_type((b_ > a_) ? a_ : b_); + a = value_type(a_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& demultiply() + { + if (a < base_mask) + { + if (a == 0) + { + r = g = b = 0; + } + else + { + calc_type r_ = (calc_type(r) * base_mask) / a; + calc_type g_ = (calc_type(g) * base_mask) / a; + calc_type b_ = (calc_type(b) * base_mask) / a; + r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_); + g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_); + b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type gradient(const self_type& c, double k) const + { + self_type ret; + calc_type ik = uround(k * base_mask); + ret.r = lerp(r, c.r, ik); + ret.g = lerp(g, c.g, ik); + ret.b = lerp(b, c.b, ik); + ret.a = lerp(a, c.a, ik); + return ret; + } + + //-------------------------------------------------------------------- + AGG_INLINE void add(const self_type& c, unsigned cover) + { + calc_type cr, cg, cb, ca; + if (cover == cover_mask) + { + if (c.a == base_mask) + { + *this = c; + return; + } + else + { + cr = r + c.r; + cg = g + c.g; + cb = b + c.b; + ca = a + c.a; + } + } + else + { + cr = r + mult_cover(c.r, cover); + cg = g + mult_cover(c.g, cover); + cb = b + mult_cover(c.b, cover); + ca = a + mult_cover(c.a, cover); + } + r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr); + g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg); + b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb); + a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca); + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma) + { + r = gamma.dir(r); + g = gamma.dir(g); + b = gamma.dir(b); + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma) + { + r = gamma.inv(r); + g = gamma.inv(g); + b = gamma.inv(b); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0,0,0); } + + //-------------------------------------------------------------------- + static self_type from_wavelength(double wl, double gamma = 1.0) + { + return self_type(rgba::from_wavelength(wl, gamma)); + } + }; + + typedef rgba8T rgba8; + typedef rgba8T srgba8; + + + //-------------------------------------------------------------rgb8_packed + inline rgba8 rgb8_packed(unsigned v) + { + return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF); + } + + //-------------------------------------------------------------bgr8_packed + inline rgba8 bgr8_packed(unsigned v) + { + return rgba8(v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF); + } + + //------------------------------------------------------------argb8_packed + inline rgba8 argb8_packed(unsigned v) + { + return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF, v >> 24); + } + + //---------------------------------------------------------rgba8_gamma_dir + template + rgba8 rgba8_gamma_dir(rgba8 c, const GammaLUT& gamma) + { + return rgba8(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a); + } + + //---------------------------------------------------------rgba8_gamma_inv + template + rgba8 rgba8_gamma_inv(rgba8 c, const GammaLUT& gamma) + { + return rgba8(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a); + } + + + + //==================================================================rgba16 + struct rgba16 + { + typedef int16u value_type; + typedef int32u calc_type; + typedef int64 long_type; + enum base_scale_e + { + base_shift = 16, + base_scale = 1 << base_shift, + base_mask = base_scale - 1, + base_MSB = 1 << (base_shift - 1) + }; + typedef rgba16 self_type; + + value_type r; + value_type g; + value_type b; + value_type a; + + //-------------------------------------------------------------------- + rgba16() {} + + //-------------------------------------------------------------------- + rgba16(unsigned r_, unsigned g_, unsigned b_, unsigned a_=base_mask) : + r(value_type(r_)), + g(value_type(g_)), + b(value_type(b_)), + a(value_type(a_)) {} + + //-------------------------------------------------------------------- + rgba16(const self_type& c, unsigned a_) : + r(c.r), g(c.g), b(c.b), a(value_type(a_)) {} + + //-------------------------------------------------------------------- + rgba16(const rgba& c) : + r((value_type)uround(c.r * double(base_mask))), + g((value_type)uround(c.g * double(base_mask))), + b((value_type)uround(c.b * double(base_mask))), + a((value_type)uround(c.a * double(base_mask))) {} + + //-------------------------------------------------------------------- + rgba16(const rgba8& c) : + r(value_type((value_type(c.r) << 8) | c.r)), + g(value_type((value_type(c.g) << 8) | c.g)), + b(value_type((value_type(c.b) << 8) | c.b)), + a(value_type((value_type(c.a) << 8) | c.a)) {} + + //-------------------------------------------------------------------- + rgba16(const srgba8& c) : + r(sRGB_conv::rgb_from_sRGB(c.r)), + g(sRGB_conv::rgb_from_sRGB(c.g)), + b(sRGB_conv::rgb_from_sRGB(c.b)), + a(sRGB_conv::alpha_from_sRGB(c.a)) {} + + //-------------------------------------------------------------------- + operator rgba() const + { + return rgba( + r / 65535.0, + g / 65535.0, + b / 65535.0, + a / 65535.0); + } + + //-------------------------------------------------------------------- + operator rgba8() const + { + return rgba8(r >> 8, g >> 8, b >> 8, a >> 8); + } + + //-------------------------------------------------------------------- + operator srgba8() const + { + // Return (non-premultiplied) sRGB values. + return srgba8( + sRGB_conv::rgb_to_sRGB(r), + sRGB_conv::rgb_to_sRGB(g), + sRGB_conv::rgb_to_sRGB(b), + sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return double(a) / base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(uround(a * base_mask)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return base_mask; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a == 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a == base_mask; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type invert(value_type x) + { + return base_mask - x; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, exact over int16u. + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + calc_type t = a * b + base_MSB; + return value_type(((t >> base_shift) + t) >> base_shift); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + if (a * b == 0) + { + return 0; + } + else if (a >= b) + { + return base_mask; + } + else return value_type((a * base_mask + (b >> 1)) / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a >> base_shift; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return a >> n; + } + + //-------------------------------------------------------------------- + // Fixed-point multiply, almost exact over int16u. + // Specifically for multiplying a color component by a cover. + static AGG_INLINE value_type mult_cover(value_type a, cover_type b) + { + return multiply(a, (b << 8) | b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return multiply((a << 8) | a, b) >> 8; + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return p + q - multiply(p, a); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + int t = (q - p) * a + base_MSB - (p > q); + return value_type(p + (((t >> base_shift) + t) >> base_shift)); + } + + //-------------------------------------------------------------------- + self_type& clear() + { + r = g = b = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + if (a_ > 1) a = 1; + a = value_type(uround(a_ * double(base_mask))); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return double(a) / double(base_mask); + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& premultiply() + { + if (a != base_mask) + { + if (a == 0) + { + r = g = b = 0; + } + else + { + r = multiply(r, a); + g = multiply(g, a); + b = multiply(b, a); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& premultiply(unsigned a_) + { + if (a < base_mask || a_ < base_mask) + { + if (a == 0 || a_ == 0) + { + r = g = b = a = 0; + } + else + { + calc_type r_ = (calc_type(r) * a_) / a; + calc_type g_ = (calc_type(g) * a_) / a; + calc_type b_ = (calc_type(b) * a_) / a; + r = value_type((r_ > a_) ? a_ : r_); + g = value_type((g_ > a_) ? a_ : g_); + b = value_type((b_ > a_) ? a_ : b_); + a = value_type(a_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& demultiply() + { + if (a < base_mask) + { + if (a == 0) + { + r = g = b = 0; + } + else + { + calc_type r_ = (calc_type(r) * base_mask) / a; + calc_type g_ = (calc_type(g) * base_mask) / a; + calc_type b_ = (calc_type(b) * base_mask) / a; + r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_); + g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_); + b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_); + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type gradient(const self_type& c, double k) const + { + self_type ret; + calc_type ik = uround(k * base_mask); + ret.r = lerp(r, c.r, ik); + ret.g = lerp(g, c.g, ik); + ret.b = lerp(b, c.b, ik); + ret.a = lerp(a, c.a, ik); + return ret; + } + + //-------------------------------------------------------------------- + AGG_INLINE void add(const self_type& c, unsigned cover) + { + calc_type cr, cg, cb, ca; + if (cover == cover_mask) + { + if (c.a == base_mask) + { + *this = c; + return; + } + else + { + cr = r + c.r; + cg = g + c.g; + cb = b + c.b; + ca = a + c.a; + } + } + else + { + cr = r + mult_cover(c.r, cover); + cg = g + mult_cover(c.g, cover); + cb = b + mult_cover(c.b, cover); + ca = a + mult_cover(c.a, cover); + } + r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr); + g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg); + b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb); + a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca); + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma) + { + r = gamma.dir(r); + g = gamma.dir(g); + b = gamma.dir(b); + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma) + { + r = gamma.inv(r); + g = gamma.inv(g); + b = gamma.inv(b); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0,0,0); } + + //-------------------------------------------------------------------- + static self_type from_wavelength(double wl, double gamma = 1.0) + { + return self_type(rgba::from_wavelength(wl, gamma)); + } + }; + + + //------------------------------------------------------rgba16_gamma_dir + template + rgba16 rgba16_gamma_dir(rgba16 c, const GammaLUT& gamma) + { + return rgba16(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a); + } + + //------------------------------------------------------rgba16_gamma_inv + template + rgba16 rgba16_gamma_inv(rgba16 c, const GammaLUT& gamma) + { + return rgba16(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a); + } + + //====================================================================rgba32 + struct rgba32 + { + typedef float value_type; + typedef double calc_type; + typedef double long_type; + typedef rgba32 self_type; + + value_type r; + value_type g; + value_type b; + value_type a; + + //-------------------------------------------------------------------- + rgba32() {} + + //-------------------------------------------------------------------- + rgba32(value_type r_, value_type g_, value_type b_, value_type a_= 1) : + r(r_), g(g_), b(b_), a(a_) {} + + //-------------------------------------------------------------------- + rgba32(const self_type& c, float a_) : + r(c.r), g(c.g), b(c.b), a(a_) {} + + //-------------------------------------------------------------------- + rgba32(const rgba& c) : + r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {} + + //-------------------------------------------------------------------- + rgba32(const rgba8& c) : + r(value_type(c.r / 255.0)), + g(value_type(c.g / 255.0)), + b(value_type(c.b / 255.0)), + a(value_type(c.a / 255.0)) {} + + //-------------------------------------------------------------------- + rgba32(const srgba8& c) : + r(sRGB_conv::rgb_from_sRGB(c.r)), + g(sRGB_conv::rgb_from_sRGB(c.g)), + b(sRGB_conv::rgb_from_sRGB(c.b)), + a(sRGB_conv::alpha_from_sRGB(c.a)) {} + + //-------------------------------------------------------------------- + rgba32(const rgba16& c) : + r(value_type(c.r / 65535.0)), + g(value_type(c.g / 65535.0)), + b(value_type(c.b / 65535.0)), + a(value_type(c.a / 65535.0)) {} + + //-------------------------------------------------------------------- + operator rgba() const + { + return rgba(r, g, b, a); + } + + //-------------------------------------------------------------------- + operator rgba8() const + { + return rgba8( + uround(r * 255.0), + uround(g * 255.0), + uround(b * 255.0), + uround(a * 255.0)); + } + + //-------------------------------------------------------------------- + operator srgba8() const + { + return srgba8( + sRGB_conv::rgb_to_sRGB(r), + sRGB_conv::rgb_to_sRGB(g), + sRGB_conv::rgb_to_sRGB(b), + sRGB_conv::alpha_to_sRGB(a)); + } + + //-------------------------------------------------------------------- + operator rgba16() const + { + return rgba8( + uround(r * 65535.0), + uround(g * 65535.0), + uround(b * 65535.0), + uround(a * 65535.0)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE double to_double(value_type a) + { + return a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type from_double(double a) + { + return value_type(a); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type empty_value() + { + return 0; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type full_value() + { + return 1; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_transparent() const + { + return a <= 0; + } + + //-------------------------------------------------------------------- + AGG_INLINE bool is_opaque() const + { + return a >= 1; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type invert(value_type x) + { + return 1 - x; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type multiply(value_type a, value_type b) + { + return value_type(a * b); + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type demultiply(value_type a, value_type b) + { + return (b == 0) ? 0 : value_type(a / b); + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downscale(T a) + { + return a; + } + + //-------------------------------------------------------------------- + template + static AGG_INLINE T downshift(T a, unsigned n) + { + return n > 0 ? a / (1 << n) : a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE value_type mult_cover(value_type a, cover_type b) + { + return value_type(a * b / cover_mask); + } + + //-------------------------------------------------------------------- + static AGG_INLINE cover_type scale_cover(cover_type a, value_type b) + { + return cover_type(uround(a * b)); + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a, assuming q is premultiplied by a. + static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a) + { + return (1 - a) * p + q; // more accurate than "p + q - p * a" + } + + //-------------------------------------------------------------------- + // Interpolate p to q by a. + static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a) + { + // The form "p + a * (q - p)" avoids a multiplication, but may produce an + // inaccurate result. For example, "p + (q - p)" may not be exactly equal + // to q. Therefore, stick to the basic expression, which at least produces + // the correct result at either extreme. + return (1 - a) * p + a * q; + } + + //-------------------------------------------------------------------- + self_type& clear() + { + r = g = b = a = 0; + return *this; + } + + //-------------------------------------------------------------------- + self_type& transparent() + { + a = 0; + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& opacity(double a_) + { + if (a_ < 0) a = 0; + else if (a_ > 1) a = 1; + else a = value_type(a_); + return *this; + } + + //-------------------------------------------------------------------- + double opacity() const + { + return a; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& premultiply() + { + if (a < 1) + { + if (a <= 0) + { + r = g = b = 0; + } + else + { + r *= a; + g *= a; + b *= a; + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type& demultiply() + { + if (a < 1) + { + if (a <= 0) + { + r = g = b = 0; + } + else + { + r /= a; + g /= a; + b /= a; + } + } + return *this; + } + + //-------------------------------------------------------------------- + AGG_INLINE self_type gradient(const self_type& c, double k) const + { + self_type ret; + ret.r = value_type(r + (c.r - r) * k); + ret.g = value_type(g + (c.g - g) * k); + ret.b = value_type(b + (c.b - b) * k); + ret.a = value_type(a + (c.a - a) * k); + return ret; + } + + //-------------------------------------------------------------------- + AGG_INLINE void add(const self_type& c, unsigned cover) + { + if (cover == cover_mask) + { + if (c.is_opaque()) + { + *this = c; + return; + } + else + { + r += c.r; + g += c.g; + b += c.b; + a += c.a; + } + } + else + { + r += mult_cover(c.r, cover); + g += mult_cover(c.g, cover); + b += mult_cover(c.b, cover); + a += mult_cover(c.a, cover); + } + if (a > 1) a = 1; + if (r > a) r = a; + if (g > a) g = a; + if (b > a) b = a; + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma) + { + r = gamma.dir(r); + g = gamma.dir(g); + b = gamma.dir(b); + } + + //-------------------------------------------------------------------- + template + AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma) + { + r = gamma.inv(r); + g = gamma.inv(g); + b = gamma.inv(b); + } + + //-------------------------------------------------------------------- + static self_type no_color() { return self_type(0,0,0,0); } + + //-------------------------------------------------------------------- + static self_type from_wavelength(double wl, double gamma = 1) + { + return self_type(rgba::from_wavelength(wl, gamma)); + } + }; +} + + + +#endif diff --git a/kiva/markers/agg/agg_config.h b/kiva/markers/agg/agg_config.h new file mode 100644 index 000000000..a1c6c8a8e --- /dev/null +++ b/kiva/markers/agg/agg_config.h @@ -0,0 +1,44 @@ +#ifndef AGG_CONFIG_INCLUDED +#define AGG_CONFIG_INCLUDED + +// This file can be used to redefine certain data types. + +//--------------------------------------- +// 1. Default basic types such as: +// +// AGG_INT8 +// AGG_INT8U +// AGG_INT16 +// AGG_INT16U +// AGG_INT32 +// AGG_INT32U +// AGG_INT64 +// AGG_INT64U +// +// Just replace this file with new defines if necessary. +// For example, if your compiler doesn't have a 64 bit integer type +// you can still use AGG if you define the follows: +// +// #define AGG_INT64 int +// #define AGG_INT64U unsigned +// +// It will result in overflow in 16 bit-per-component image/pattern resampling +// but it won't result any crash and the rest of the library will remain +// fully functional. + + +//--------------------------------------- +// 2. Default rendering_buffer type. Can be: +// +// Provides faster access for massive pixel operations, +// such as blur, image filtering: +// #define AGG_RENDERING_BUFFER row_ptr_cache +// +// Provides cheaper creation and destruction (no mem allocs): +// #define AGG_RENDERING_BUFFER row_accessor +// +// You can still use both of them simultaneously in your applications +// This #define is used only for default rendering_buffer type, +// in short hand typedefs like pixfmt_rgba32. + +#endif diff --git a/kiva/markers/agg/agg_dda_line.h b/kiva/markers/agg/agg_dda_line.h new file mode 100644 index 000000000..01880e330 --- /dev/null +++ b/kiva/markers/agg/agg_dda_line.h @@ -0,0 +1,290 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// classes dda_line_interpolator, dda2_line_interpolator +// +//---------------------------------------------------------------------------- + +#ifndef AGG_DDA_LINE_INCLUDED +#define AGG_DDA_LINE_INCLUDED + +#include +#include "agg_basics.h" + +namespace agg24markers +{ + + //===================================================dda_line_interpolator + template class dda_line_interpolator + { + public: + //-------------------------------------------------------------------- + dda_line_interpolator() {} + + //-------------------------------------------------------------------- + dda_line_interpolator(int y1, int y2, unsigned count) : + m_y(y1), + m_inc(((y2 - y1) << FractionShift) / int(count)), + m_dy(0) + { + } + + //-------------------------------------------------------------------- + void operator ++ () + { + m_dy += m_inc; + } + + //-------------------------------------------------------------------- + void operator -- () + { + m_dy -= m_inc; + } + + //-------------------------------------------------------------------- + void operator += (unsigned n) + { + m_dy += m_inc * n; + } + + //-------------------------------------------------------------------- + void operator -= (unsigned n) + { + m_dy -= m_inc * n; + } + + + //-------------------------------------------------------------------- + int y() const { return m_y + (m_dy >> (FractionShift-YShift)); } + int dy() const { return m_dy; } + + + private: + int m_y; + int m_inc; + int m_dy; + }; + + + + + + //=================================================dda2_line_interpolator + class dda2_line_interpolator + { + public: + typedef int save_data_type; + enum save_size_e { save_size = 2 }; + + //-------------------------------------------------------------------- + dda2_line_interpolator() {} + + //-------------------------------------------- Forward-adjusted line + dda2_line_interpolator(int y1, int y2, int count) : + m_cnt(count <= 0 ? 1 : count), + m_lft((y2 - y1) / m_cnt), + m_rem((y2 - y1) % m_cnt), + m_mod(m_rem), + m_y(y1) + { + if(m_mod <= 0) + { + m_mod += count; + m_rem += count; + m_lft--; + } + m_mod -= count; + } + + //-------------------------------------------- Backward-adjusted line + dda2_line_interpolator(int y1, int y2, int count, int) : + m_cnt(count <= 0 ? 1 : count), + m_lft((y2 - y1) / m_cnt), + m_rem((y2 - y1) % m_cnt), + m_mod(m_rem), + m_y(y1) + { + if(m_mod <= 0) + { + m_mod += count; + m_rem += count; + m_lft--; + } + } + + //-------------------------------------------- Backward-adjusted line + dda2_line_interpolator(int y, int count) : + m_cnt(count <= 0 ? 1 : count), + m_lft(y / m_cnt), + m_rem(y % m_cnt), + m_mod(m_rem), + m_y(0) + { + if(m_mod <= 0) + { + m_mod += count; + m_rem += count; + m_lft--; + } + } + + + //-------------------------------------------------------------------- + void save(save_data_type* data) const + { + data[0] = m_mod; + data[1] = m_y; + } + + //-------------------------------------------------------------------- + void load(const save_data_type* data) + { + m_mod = data[0]; + m_y = data[1]; + } + + //-------------------------------------------------------------------- + void operator++() + { + m_mod += m_rem; + m_y += m_lft; + if(m_mod > 0) + { + m_mod -= m_cnt; + m_y++; + } + } + + //-------------------------------------------------------------------- + void operator--() + { + if(m_mod <= m_rem) + { + m_mod += m_cnt; + m_y--; + } + m_mod -= m_rem; + m_y -= m_lft; + } + + //-------------------------------------------------------------------- + void adjust_forward() + { + m_mod -= m_cnt; + } + + //-------------------------------------------------------------------- + void adjust_backward() + { + m_mod += m_cnt; + } + + //-------------------------------------------------------------------- + int mod() const { return m_mod; } + int rem() const { return m_rem; } + int lft() const { return m_lft; } + + //-------------------------------------------------------------------- + int y() const { return m_y; } + + private: + int m_cnt; + int m_lft; + int m_rem; + int m_mod; + int m_y; + }; + + + + + + + + //---------------------------------------------line_bresenham_interpolator + class line_bresenham_interpolator + { + public: + enum subpixel_scale_e + { + subpixel_shift = 8, + subpixel_scale = 1 << subpixel_shift, + subpixel_mask = subpixel_scale - 1 + }; + + //-------------------------------------------------------------------- + static int line_lr(int v) { return v >> subpixel_shift; } + + //-------------------------------------------------------------------- + line_bresenham_interpolator(int x1, int y1, int x2, int y2) : + m_x1_lr(line_lr(x1)), + m_y1_lr(line_lr(y1)), + m_x2_lr(line_lr(x2)), + m_y2_lr(line_lr(y2)), + m_ver(abs(m_x2_lr - m_x1_lr) < abs(m_y2_lr - m_y1_lr)), + m_len(m_ver ? abs(m_y2_lr - m_y1_lr) : + abs(m_x2_lr - m_x1_lr)), + m_inc(m_ver ? ((y2 > y1) ? 1 : -1) : ((x2 > x1) ? 1 : -1)), + m_interpolator(m_ver ? x1 : y1, + m_ver ? x2 : y2, + m_len) + { + } + + //-------------------------------------------------------------------- + bool is_ver() const { return m_ver; } + unsigned len() const { return m_len; } + int inc() const { return m_inc; } + + //-------------------------------------------------------------------- + void hstep() + { + ++m_interpolator; + m_x1_lr += m_inc; + } + + //-------------------------------------------------------------------- + void vstep() + { + ++m_interpolator; + m_y1_lr += m_inc; + } + + //-------------------------------------------------------------------- + int x1() const { return m_x1_lr; } + int y1() const { return m_y1_lr; } + int x2() const { return line_lr(m_interpolator.y()); } + int y2() const { return line_lr(m_interpolator.y()); } + int x2_hr() const { return m_interpolator.y(); } + int y2_hr() const { return m_interpolator.y(); } + + private: + int m_x1_lr; + int m_y1_lr; + int m_x2_lr; + int m_y2_lr; + bool m_ver; + unsigned m_len; + int m_inc; + dda2_line_interpolator m_interpolator; + + }; + + +} + + + +#endif diff --git a/kiva/markers/agg/agg_ellipse_bresenham.h b/kiva/markers/agg/agg_ellipse_bresenham.h new file mode 100644 index 000000000..c658bf405 --- /dev/null +++ b/kiva/markers/agg/agg_ellipse_bresenham.h @@ -0,0 +1,113 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// Simple Bresenham interpolator for ellipsees +// +//---------------------------------------------------------------------------- + +#ifndef AGG_ELLIPSE_BRESENHAM_INCLUDED +#define AGG_ELLIPSE_BRESENHAM_INCLUDED + + +#include "agg_basics.h" + + +namespace agg24markers +{ + + //------------------------------------------ellipse_bresenham_interpolator + class ellipse_bresenham_interpolator + { + public: + ellipse_bresenham_interpolator(int rx, int ry) : + m_rx2(rx * rx), + m_ry2(ry * ry), + m_two_rx2(m_rx2 << 1), + m_two_ry2(m_ry2 << 1), + m_dx(0), + m_dy(0), + m_inc_x(0), + m_inc_y(-ry * m_two_rx2), + m_cur_f(0) + {} + + int dx() const { return m_dx; } + int dy() const { return m_dy; } + + void operator++ () + { + int mx, my, mxy, min_m; + int fx, fy, fxy; + + mx = fx = m_cur_f + m_inc_x + m_ry2; + if(mx < 0) mx = -mx; + + my = fy = m_cur_f + m_inc_y + m_rx2; + if(my < 0) my = -my; + + mxy = fxy = m_cur_f + m_inc_x + m_ry2 + m_inc_y + m_rx2; + if(mxy < 0) mxy = -mxy; + + min_m = mx; + bool flag = true; + + if(min_m > my) + { + min_m = my; + flag = false; + } + + m_dx = m_dy = 0; + + if(min_m > mxy) + { + m_inc_x += m_two_ry2; + m_inc_y += m_two_rx2; + m_cur_f = fxy; + m_dx = 1; + m_dy = 1; + return; + } + + if(flag) + { + m_inc_x += m_two_ry2; + m_cur_f = fx; + m_dx = 1; + return; + } + + m_inc_y += m_two_rx2; + m_cur_f = fy; + m_dy = 1; + } + + private: + int m_rx2; + int m_ry2; + int m_two_rx2; + int m_two_ry2; + int m_dx; + int m_dy; + int m_inc_x; + int m_inc_y; + int m_cur_f; + + }; + +} + +#endif + diff --git a/kiva/markers/agg/agg_gamma_functions.h b/kiva/markers/agg/agg_gamma_functions.h new file mode 100644 index 000000000..61cd391f9 --- /dev/null +++ b/kiva/markers/agg/agg_gamma_functions.h @@ -0,0 +1,132 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- + +#ifndef AGG_GAMMA_FUNCTIONS_INCLUDED +#define AGG_GAMMA_FUNCTIONS_INCLUDED + +#include +#include "agg_basics.h" + +namespace agg24markers +{ + //===============================================================gamma_none + struct gamma_none + { + double operator()(double x) const { return x; } + }; + + + //==============================================================gamma_power + class gamma_power + { + public: + gamma_power() : m_gamma(1.0) {} + gamma_power(double g) : m_gamma(g) {} + + void gamma(double g) { m_gamma = g; } + double gamma() const { return m_gamma; } + + double operator() (double x) const + { + return pow(x, m_gamma); + } + + private: + double m_gamma; + }; + + + //==========================================================gamma_threshold + class gamma_threshold + { + public: + gamma_threshold() : m_threshold(0.5) {} + gamma_threshold(double t) : m_threshold(t) {} + + void threshold(double t) { m_threshold = t; } + double threshold() const { return m_threshold; } + + double operator() (double x) const + { + return (x < m_threshold) ? 0.0 : 1.0; + } + + private: + double m_threshold; + }; + + + //============================================================gamma_linear + class gamma_linear + { + public: + gamma_linear() : m_start(0.0), m_end(1.0) {} + gamma_linear(double s, double e) : m_start(s), m_end(e) {} + + void set(double s, double e) { m_start = s; m_end = e; } + void start(double s) { m_start = s; } + void end(double e) { m_end = e; } + double start() const { return m_start; } + double end() const { return m_end; } + + double operator() (double x) const + { + if(x < m_start) return 0.0; + if(x > m_end) return 1.0; + return (x - m_start) / (m_end - m_start); + } + + private: + double m_start; + double m_end; + }; + + + //==========================================================gamma_multiply + class gamma_multiply + { + public: + gamma_multiply() : m_mul(1.0) {} + gamma_multiply(double v) : m_mul(v) {} + + void value(double v) { m_mul = v; } + double value() const { return m_mul; } + + double operator() (double x) const + { + double y = x * m_mul; + if(y > 1.0) y = 1.0; + return y; + } + + private: + double m_mul; + }; + + inline double sRGB_to_linear(double x) + { + return (x <= 0.04045) ? (x / 12.92) : pow((x + 0.055) / (1.055), 2.4); + } + + inline double linear_to_sRGB(double x) + { + return (x <= 0.0031308) ? (x * 12.92) : (1.055 * pow(x, 1 / 2.4) - 0.055); + } +} + +#endif + + + diff --git a/kiva/markers/agg/agg_gamma_lut.h b/kiva/markers/agg/agg_gamma_lut.h new file mode 100644 index 000000000..0077e7e78 --- /dev/null +++ b/kiva/markers/agg/agg_gamma_lut.h @@ -0,0 +1,305 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- + +#ifndef AGG_GAMMA_LUT_INCLUDED +#define AGG_GAMMA_LUT_INCLUDED + +#include +#include "agg_basics.h" +#include "agg_gamma_functions.h" + +namespace agg24markers +{ + template class gamma_lut + { + public: + typedef gamma_lut self_type; + + enum gamma_scale_e + { + gamma_shift = GammaShift, + gamma_size = 1 << gamma_shift, + gamma_mask = gamma_size - 1 + }; + + enum hi_res_scale_e + { + hi_res_shift = HiResShift, + hi_res_size = 1 << hi_res_shift, + hi_res_mask = hi_res_size - 1 + }; + + ~gamma_lut() + { + pod_allocator::deallocate(m_inv_gamma, hi_res_size); + pod_allocator::deallocate(m_dir_gamma, gamma_size); + } + + gamma_lut() : + m_gamma(1.0), + m_dir_gamma(pod_allocator::allocate(gamma_size)), + m_inv_gamma(pod_allocator::allocate(hi_res_size)) + { + unsigned i; + for(i = 0; i < gamma_size; i++) + { + m_dir_gamma[i] = HiResT(i << (hi_res_shift - gamma_shift)); + } + + for(i = 0; i < hi_res_size; i++) + { + m_inv_gamma[i] = LoResT(i >> (hi_res_shift - gamma_shift)); + } + } + + gamma_lut(double g) : + m_gamma(1.0), + m_dir_gamma(pod_allocator::allocate(gamma_size)), + m_inv_gamma(pod_allocator::allocate(hi_res_size)) + { + gamma(g); + } + + void gamma(double g) + { + m_gamma = g; + + unsigned i; + for(i = 0; i < gamma_size; i++) + { + m_dir_gamma[i] = (HiResT) + uround(pow(i / double(gamma_mask), m_gamma) * double(hi_res_mask)); + } + + double inv_g = 1.0 / g; + for(i = 0; i < hi_res_size; i++) + { + m_inv_gamma[i] = (LoResT) + uround(pow(i / double(hi_res_mask), inv_g) * double(gamma_mask)); + } + } + + double gamma() const + { + return m_gamma; + } + + HiResT dir(LoResT v) const + { + return m_dir_gamma[unsigned(v)]; + } + + LoResT inv(HiResT v) const + { + return m_inv_gamma[unsigned(v)]; + } + + private: + gamma_lut(const self_type&); + const self_type& operator = (const self_type&); + + double m_gamma; + HiResT* m_dir_gamma; + LoResT* m_inv_gamma; + }; + + // + // sRGB support classes + // + + // Optimized sRGB lookup table. The direct conversion (sRGB to linear) + // is a straightforward lookup. The inverse conversion (linear to sRGB) + // is implemented using binary search. + template + class sRGB_lut_base + { + public: + LinearType dir(int8u v) const + { + return m_dir_table[v]; + } + + int8u inv(LinearType v) const + { + // Unrolled binary search. + int8u x = 0; + if (v > m_inv_table[128]) x = 128; + if (v > m_inv_table[x + 64]) x += 64; + if (v > m_inv_table[x + 32]) x += 32; + if (v > m_inv_table[x + 16]) x += 16; + if (v > m_inv_table[x + 8]) x += 8; + if (v > m_inv_table[x + 4]) x += 4; + if (v > m_inv_table[x + 2]) x += 2; + if (v > m_inv_table[x + 1]) x += 1; + return x; + } + + protected: + LinearType m_dir_table[256]; + LinearType m_inv_table[256]; + + // Only derived classes may instantiate. + sRGB_lut_base() + { + } + }; + + // sRGB_lut - implements sRGB conversion for the various types. + // Base template is undefined, specializations are provided below. + template + class sRGB_lut; + + template<> + class sRGB_lut : public sRGB_lut_base + { + public: + sRGB_lut() + { + // Generate lookup tables. + m_dir_table[0] = 0; + m_inv_table[0] = 0; + for (unsigned i = 1; i <= 255; ++i) + { + // Floating-point RGB is in range [0,1]. + m_dir_table[i] = float(sRGB_to_linear(i / 255.0)); + m_inv_table[i] = float(sRGB_to_linear((i - 0.5) / 255.0)); + } + } + }; + + template<> + class sRGB_lut : public sRGB_lut_base + { + public: + sRGB_lut() + { + // Generate lookup tables. + m_dir_table[0] = 0; + m_inv_table[0] = 0; + for (unsigned i = 1; i <= 255; ++i) + { + // 16-bit RGB is in range [0,65535]. + m_dir_table[i] = uround(65535.0 * sRGB_to_linear(i / 255.0)); + m_inv_table[i] = uround(65535.0 * sRGB_to_linear((i - 0.5) / 255.0)); + } + } + }; + + template<> + class sRGB_lut : public sRGB_lut_base + { + public: + sRGB_lut() + { + // Generate lookup tables. + m_dir_table[0] = 0; + m_inv_table[0] = 0; + for (unsigned i = 1; i <= 255; ++i) + { + // 8-bit RGB is handled with simple bidirectional lookup tables. + m_dir_table[i] = uround(255.0 * sRGB_to_linear(i / 255.0)); + m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 255.0)); + } + } + + int8u inv(int8u v) const + { + // In this case, the inverse transform is a simple lookup. + return m_inv_table[v]; + } + }; + + // Common base class for sRGB_conv objects. Defines an internal + // sRGB_lut object so that users don't have to. + template + class sRGB_conv_base + { + public: + static T rgb_from_sRGB(int8u x) + { + return lut.dir(x); + } + + static int8u rgb_to_sRGB(T x) + { + return lut.inv(x); + } + + private: + static sRGB_lut lut; + }; + + // Definition of sRGB_conv_base::lut. Due to the fact that this a template, + // we don't need to place the definition in a cpp file. Hurrah. + template + sRGB_lut sRGB_conv_base::lut; + + // Wrapper for sRGB-linear conversion. + // Base template is undefined, specializations are provided below. + template + class sRGB_conv; + + template<> + class sRGB_conv : public sRGB_conv_base + { + public: + static float alpha_from_sRGB(int8u x) + { + return float(x / 255.0); + } + + static int8u alpha_to_sRGB(float x) + { + if (x <= 0) return 0; + else if (x >= 1) return 255; + else return int8u(0.5 + x * 255); + } + }; + + template<> + class sRGB_conv : public sRGB_conv_base + { + public: + static int16u alpha_from_sRGB(int8u x) + { + return (x << 8) | x; + } + + static int8u alpha_to_sRGB(int16u x) + { + return x >> 8; + } + }; + + template<> + class sRGB_conv : public sRGB_conv_base + { + public: + static int8u alpha_from_sRGB(int8u x) + { + return x; + } + + static int8u alpha_to_sRGB(int8u x) + { + return x; + } + }; +} + +#endif diff --git a/kiva/markers/agg/agg_pixfmt_base.h b/kiva/markers/agg/agg_pixfmt_base.h new file mode 100644 index 000000000..fe04b6b93 --- /dev/null +++ b/kiva/markers/agg/agg_pixfmt_base.h @@ -0,0 +1,97 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- + +#ifndef AGG_PIXFMT_BASE_INCLUDED +#define AGG_PIXFMT_BASE_INCLUDED + +#include "agg_basics.h" +#include "agg_color_gray.h" +#include "agg_color_rgba.h" + +namespace agg24markers +{ + struct pixfmt_gray_tag + { + }; + + struct pixfmt_rgb_tag + { + }; + + struct pixfmt_rgba_tag + { + }; + + //--------------------------------------------------------------blender_base + template + struct blender_base + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + + static rgba get(value_type r, value_type g, value_type b, value_type a, cover_type cover = cover_full) + { + if (cover > cover_none) + { + rgba c( + color_type::to_double(r), + color_type::to_double(g), + color_type::to_double(b), + color_type::to_double(a)); + + if (cover < cover_full) + { + double x = double(cover) / cover_full; + c.r *= x; + c.g *= x; + c.b *= x; + c.a *= x; + } + + return c; + } + else return rgba::no_color(); + } + + static rgba get(const value_type* p, cover_type cover = cover_full) + { + return get( + p[order_type::R], + p[order_type::G], + p[order_type::B], + p[order_type::A], + cover); + } + + static void set(value_type* p, value_type r, value_type g, value_type b, value_type a) + { + p[order_type::R] = r; + p[order_type::G] = g; + p[order_type::B] = b; + p[order_type::A] = a; + } + + static void set(value_type* p, const rgba& c) + { + p[order_type::R] = color_type::from_double(c.r); + p[order_type::G] = color_type::from_double(c.g); + p[order_type::B] = color_type::from_double(c.b); + p[order_type::A] = color_type::from_double(c.a); + } + }; +} + +#endif diff --git a/kiva/markers/agg/agg_pixfmt_rgb.h b/kiva/markers/agg/agg_pixfmt_rgb.h new file mode 100644 index 000000000..5133e55b2 --- /dev/null +++ b/kiva/markers/agg/agg_pixfmt_rgb.h @@ -0,0 +1,995 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// Adaptation for high precision colors has been sponsored by +// Liberty Technology Systems, Inc., visit http://lib-sys.com +// +// Liberty Technology Systems, Inc. is the provider of +// PostScript and PDF technology for software developers. +// +//---------------------------------------------------------------------------- + +#ifndef AGG_PIXFMT_RGB_INCLUDED +#define AGG_PIXFMT_RGB_INCLUDED + +#include +#include "agg_pixfmt_base.h" +#include "agg_rendering_buffer.h" + +namespace agg24markers +{ + + //=====================================================apply_gamma_dir_rgb + template class apply_gamma_dir_rgb + { + public: + typedef typename ColorT::value_type value_type; + + apply_gamma_dir_rgb(const GammaLut& gamma) : m_gamma(gamma) {} + + AGG_INLINE void operator () (value_type* p) + { + p[Order::R] = m_gamma.dir(p[Order::R]); + p[Order::G] = m_gamma.dir(p[Order::G]); + p[Order::B] = m_gamma.dir(p[Order::B]); + } + + private: + const GammaLut& m_gamma; + }; + + + + //=====================================================apply_gamma_inv_rgb + template class apply_gamma_inv_rgb + { + public: + typedef typename ColorT::value_type value_type; + + apply_gamma_inv_rgb(const GammaLut& gamma) : m_gamma(gamma) {} + + AGG_INLINE void operator () (value_type* p) + { + p[Order::R] = m_gamma.inv(p[Order::R]); + p[Order::G] = m_gamma.inv(p[Order::G]); + p[Order::B] = m_gamma.inv(p[Order::B]); + } + + private: + const GammaLut& m_gamma; + }; + + + //=========================================================blender_rgb + template + struct blender_rgb + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's + // compositing function. Since the render buffer is opaque we skip the + // initial premultiply and final demultiply. + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + p[Order::R] = color_type::lerp(p[Order::R], cr, alpha); + p[Order::G] = color_type::lerp(p[Order::G], cg, alpha); + p[Order::B] = color_type::lerp(p[Order::B], cb, alpha); + } + }; + + //======================================================blender_rgb_pre + template + struct blender_rgb_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + // Blend pixels using the premultiplied form of Alvy-Ray Smith's + // compositing function. + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, + color_type::mult_cover(cr, cover), + color_type::mult_cover(cg, cover), + color_type::mult_cover(cb, cover), + color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha); + p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha); + p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha); + } + }; + + //===================================================blender_rgb_gamma + template + class blender_rgb_gamma : public blender_base + { + public: + typedef ColorT color_type; + typedef Order order_type; + typedef Gamma gamma_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + //-------------------------------------------------------------------- + blender_rgb_gamma() : m_gamma(0) {} + void gamma(const gamma_type& g) { m_gamma = &g; } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + calc_type r = m_gamma->dir(p[Order::R]); + calc_type g = m_gamma->dir(p[Order::G]); + calc_type b = m_gamma->dir(p[Order::B]); + p[Order::R] = m_gamma->inv(color_type::downscale((m_gamma->dir(cr) - r) * alpha) + r); + p[Order::G] = m_gamma->inv(color_type::downscale((m_gamma->dir(cg) - g) * alpha) + g); + p[Order::B] = m_gamma->inv(color_type::downscale((m_gamma->dir(cb) - b) * alpha) + b); + } + + private: + const gamma_type* m_gamma; + }; + + + //==================================================pixfmt_alpha_blend_rgb + template + class pixfmt_alpha_blend_rgb + { + public: + typedef pixfmt_rgb_tag pixfmt_category; + typedef RenBuf rbuf_type; + typedef Blender blender_type; + typedef typename rbuf_type::row_data row_data; + typedef typename blender_type::color_type color_type; + typedef typename blender_type::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + enum + { + num_components = 3, + pix_step = Step, + pix_offset = Offset, + pix_width = sizeof(value_type) * pix_step + }; + struct pixel_type + { + value_type c[num_components]; + + void set(value_type r, value_type g, value_type b) + { + c[order_type::R] = r; + c[order_type::G] = g; + c[order_type::B] = b; + } + + void set(const color_type& color) + { + set(color.r, color.g, color.b); + } + + void get(value_type& r, value_type& g, value_type& b) const + { + r = c[order_type::R]; + g = c[order_type::G]; + b = c[order_type::B]; + } + + color_type get() const + { + return color_type( + c[order_type::R], + c[order_type::G], + c[order_type::B]); + } + + pixel_type* next() + { + return (pixel_type*)(c + pix_step); + } + + const pixel_type* next() const + { + return (const pixel_type*)(c + pix_step); + } + + pixel_type* advance(int n) + { + return (pixel_type*)(c + n * pix_step); + } + + const pixel_type* advance(int n) const + { + return (const pixel_type*)(c + n * pix_step); + } + }; + + private: + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, + value_type r, value_type g, value_type b, value_type a, + unsigned cover) + { + m_blender.blend_pix(p->c, r, g, b, a, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, + value_type r, value_type g, value_type b, value_type a) + { + m_blender.blend_pix(p->c, r, g, b, a); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover) + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, const color_type& c) + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover) + { + if (!c.is_transparent()) + { + if (c.is_opaque() && cover == cover_mask) + { + p->set(c); + } + else + { + blend_pix(p, c, cover); + } + } + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c) + { + if (!c.is_transparent()) + { + if (c.is_opaque()) + { + p->set(c); + } + else + { + blend_pix(p, c); + } + } + } + + public: + //-------------------------------------------------------------------- + explicit pixfmt_alpha_blend_rgb(rbuf_type& rb) : + m_rbuf(&rb) + {} + void attach(rbuf_type& rb) { m_rbuf = &rb; } + + //-------------------------------------------------------------------- + template + bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2) + { + rect_i r(x1, y1, x2, y2); + if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1))) + { + int stride = pixf.stride(); + m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1), + (r.x2 - r.x1) + 1, + (r.y2 - r.y1) + 1, + stride); + return true; + } + return false; + } + + //-------------------------------------------------------------------- + Blender& blender() { return m_blender; } + + //-------------------------------------------------------------------- + AGG_INLINE unsigned width() const { return m_rbuf->width(); } + AGG_INLINE unsigned height() const { return m_rbuf->height(); } + AGG_INLINE int stride() const { return m_rbuf->stride(); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); } + AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); } + AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* pix_ptr(int x, int y) + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset); + } + + AGG_INLINE const int8u* pix_ptr(int x, int y) const + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset); + } + + // Return pointer to pixel value, forcing row to be allocated. + AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len) + { + return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset)); + } + + // Return pointer to pixel value, or null if row not allocated. + AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const + { + int8u* p = m_rbuf->row_ptr(y); + return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0; + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static pixel_type* pix_value_ptr(void* p) + { + return (pixel_type*)((value_type*)p + pix_offset); + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static const pixel_type* pix_value_ptr(const void* p) + { + return (const pixel_type*)((const value_type*)p + pix_offset); + } + + //-------------------------------------------------------------------- + AGG_INLINE static void write_plain_color(void* p, color_type c) + { + // RGB formats are implicitly premultiplied. + c.premultiply(); + pix_value_ptr(p)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE static color_type read_plain_color(const void* p) + { + return pix_value_ptr(p)->get(); + } + + //-------------------------------------------------------------------- + AGG_INLINE static void make_pix(int8u* p, const color_type& c) + { + ((pixel_type*)p)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE color_type pixel(int x, int y) const + { + if (const pixel_type* p = pix_value_ptr(x, y)) + { + return p->get(); + } + return color_type::no_color(); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_pixel(int x, int y, const color_type& c) + { + pix_value_ptr(x, y, 1)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover) + { + copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_hline(int x, int y, + unsigned len, + const color_type& c) + { + pixel_type* p = pix_value_ptr(x, y, len); + do + { + p->set(c); + p = p->next(); + } + while(--len); + } + + + //-------------------------------------------------------------------- + AGG_INLINE void copy_vline(int x, int y, + unsigned len, + const color_type& c) + { + do + { + pix_value_ptr(x, y++, 1)->set(c); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_hline(int x, int y, + unsigned len, + const color_type& c, + int8u cover) + { + if (!c.is_transparent()) + { + pixel_type* p = pix_value_ptr(x, y, len); + + if (c.is_opaque() && cover == cover_mask) + { + do + { + p->set(c); + p = p->next(); + } + while (--len); + } + else + { + do + { + blend_pix(p, c, cover); + p = p->next(); + } + while (--len); + } + } + } + + + //-------------------------------------------------------------------- + void blend_vline(int x, int y, + unsigned len, + const color_type& c, + int8u cover) + { + if (!c.is_transparent()) + { + if (c.is_opaque() && cover == cover_mask) + { + do + { + pix_value_ptr(x, y++, 1)->set(c); + } + while (--len); + } + else + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), c, cover); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + void blend_solid_hspan(int x, int y, + unsigned len, + const color_type& c, + const int8u* covers) + { + if (!c.is_transparent()) + { + pixel_type* p = pix_value_ptr(x, y, len); + + do + { + if (c.is_opaque() && *covers == cover_mask) + { + p->set(c); + } + else + { + blend_pix(p, c, *covers); + } + p = p->next(); + ++covers; + } + while (--len); + } + } + + + //-------------------------------------------------------------------- + void blend_solid_vspan(int x, int y, + unsigned len, + const color_type& c, + const int8u* covers) + { + if (!c.is_transparent()) + { + do + { + pixel_type* p = pix_value_ptr(x, y++, 1); + + if (c.is_opaque() && *covers == cover_mask) + { + p->set(c); + } + else + { + blend_pix(p, c, *covers); + } + ++covers; + } + while (--len); + } + } + + //-------------------------------------------------------------------- + void copy_color_hspan(int x, int y, + unsigned len, + const color_type* colors) + { + pixel_type* p = pix_value_ptr(x, y, len); + + do + { + p->set(*colors++); + p = p->next(); + } + while (--len); + } + + + //-------------------------------------------------------------------- + void copy_color_vspan(int x, int y, + unsigned len, + const color_type* colors) + { + do + { + pix_value_ptr(x, y++, 1)->set(*colors++); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_color_hspan(int x, int y, + unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + pixel_type* p = pix_value_ptr(x, y, len); + + if (covers) + { + do + { + copy_or_blend_pix(p, *colors++, *covers++); + p = p->next(); + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(p, *colors++); + p = p->next(); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(p, *colors++, cover); + p = p->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + void blend_color_vspan(int x, int y, + unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + if (covers) + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++); + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + template void for_each_pixel(Function f) + { + for (unsigned y = 0; y < height(); ++y) + { + row_data r = m_rbuf->row(y); + if (r.ptr) + { + unsigned len = r.x2 - r.x1 + 1; + pixel_type* p = pix_value_ptr(r.x1, y, len); + do + { + f(p->c); + p = p->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + template void apply_gamma_dir(const GammaLut& g) + { + for_each_pixel(apply_gamma_dir_rgb(g)); + } + + //-------------------------------------------------------------------- + template void apply_gamma_inv(const GammaLut& g) + { + for_each_pixel(apply_gamma_inv_rgb(g)); + } + + //-------------------------------------------------------------------- + template + void copy_from(const RenBuf2& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len) + { + if (const int8u* p = from.row_ptr(ysrc)) + { + memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width, + p + xsrc * pix_width, + len * pix_width); + } + } + + //-------------------------------------------------------------------- + // Blend from an RGBA surface. + template + void blend_from(const SrcPixelFormatRenderer& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + typedef typename SrcPixelFormatRenderer::order_type src_order; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + if (cover == cover_mask) + { + do + { + value_type alpha = psrc->c[src_order::A]; + if (alpha <= color_type::empty_value()) + { + if (alpha >= color_type::full_value()) + { + pdst->c[order_type::R] = psrc->c[src_order::R]; + pdst->c[order_type::G] = psrc->c[src_order::G]; + pdst->c[order_type::B] = psrc->c[src_order::B]; + } + else + { + blend_pix(pdst, + psrc->c[src_order::R], + psrc->c[src_order::G], + psrc->c[src_order::B], + alpha); + } + } + psrc = psrc->next(); + pdst = pdst->next(); + } + while(--len); + } + else + { + do + { + copy_or_blend_pix(pdst, psrc->get(), cover); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + // Blend from single color, using grayscale surface as alpha channel. + template + void blend_from_color(const SrcPixelFormatRenderer& from, + const color_type& color, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + typedef typename SrcPixelFormatRenderer::color_type src_color_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + do + { + copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0])); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + + //-------------------------------------------------------------------- + // Blend from color table, using grayscale surface as indexes into table. + // Obviously, this only works for integer value types. + template + void blend_from_lut(const SrcPixelFormatRenderer& from, + const color_type* color_lut, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + if (cover == cover_mask) + { + do + { + const color_type& color = color_lut[psrc->c[0]]; + blend_pix(pdst, color); + psrc = psrc->next(); + pdst = pdst->next(); + } + while(--len); + } + else + { + do + { + copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover); + psrc = psrc->next(); + pdst = pdst->next(); + } + while(--len); + } + } + } + + private: + rbuf_type* m_rbuf; + Blender m_blender; + }; + + //----------------------------------------------------------------------- + typedef blender_rgb blender_rgb24; + typedef blender_rgb blender_bgr24; + typedef blender_rgb blender_srgb24; + typedef blender_rgb blender_sbgr24; + typedef blender_rgb blender_rgb48; + typedef blender_rgb blender_bgr48; + typedef blender_rgb blender_rgb96; + typedef blender_rgb blender_bgr96; + + typedef blender_rgb_pre blender_rgb24_pre; + typedef blender_rgb_pre blender_bgr24_pre; + typedef blender_rgb_pre blender_srgb24_pre; + typedef blender_rgb_pre blender_sbgr24_pre; + typedef blender_rgb_pre blender_rgb48_pre; + typedef blender_rgb_pre blender_bgr48_pre; + typedef blender_rgb_pre blender_rgb96_pre; + typedef blender_rgb_pre blender_bgr96_pre; + + typedef pixfmt_alpha_blend_rgb pixfmt_rgb24; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr24; + typedef pixfmt_alpha_blend_rgb pixfmt_srgb24; + typedef pixfmt_alpha_blend_rgb pixfmt_sbgr24; + typedef pixfmt_alpha_blend_rgb pixfmt_rgb48; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr48; + typedef pixfmt_alpha_blend_rgb pixfmt_rgb96; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr96; + + typedef pixfmt_alpha_blend_rgb pixfmt_rgb24_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr24_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_srgb24_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_sbgr24_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_rgb48_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr48_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_rgb96_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgr96_pre; + + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx32; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb32; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr32; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx32; + typedef pixfmt_alpha_blend_rgb pixfmt_srgbx32; + typedef pixfmt_alpha_blend_rgb pixfmt_sxrgb32; + typedef pixfmt_alpha_blend_rgb pixfmt_sxbgr32; + typedef pixfmt_alpha_blend_rgb pixfmt_sbgrx32; + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx64; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb64; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr64; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx64; + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx128; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb128; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr128; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx128; + + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_srgbx32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_sxrgb32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_sxbgr32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_sbgrx32_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx64_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb64_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr64_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx64_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_rgbx128_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xrgb128_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_xbgr128_pre; + typedef pixfmt_alpha_blend_rgb pixfmt_bgrx128_pre; + + + //-----------------------------------------------------pixfmt_rgb24_gamma + template class pixfmt_rgb24_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_rgb24_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + + //-----------------------------------------------------pixfmt_srgb24_gamma + template class pixfmt_srgb24_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_srgb24_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + + //-----------------------------------------------------pixfmt_bgr24_gamma + template class pixfmt_bgr24_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_bgr24_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + + //-----------------------------------------------------pixfmt_sbgr24_gamma + template class pixfmt_sbgr24_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_sbgr24_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + + //-----------------------------------------------------pixfmt_rgb48_gamma + template class pixfmt_rgb48_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_rgb48_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + + //-----------------------------------------------------pixfmt_bgr48_gamma + template class pixfmt_bgr48_gamma : + public pixfmt_alpha_blend_rgb, rendering_buffer, 3> + { + public: + pixfmt_bgr48_gamma(rendering_buffer& rb, const Gamma& g) : + pixfmt_alpha_blend_rgb, rendering_buffer, 3>(rb) + { + this->blender().gamma(g); + } + }; + +} + +#endif + diff --git a/kiva/markers/agg/agg_pixfmt_rgba.h b/kiva/markers/agg/agg_pixfmt_rgba.h new file mode 100644 index 000000000..f99762c55 --- /dev/null +++ b/kiva/markers/agg/agg_pixfmt_rgba.h @@ -0,0 +1,2803 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// Adaptation for high precision colors has been sponsored by +// Liberty Technology Systems, Inc., visit http://lib-sys.com +// +// Liberty Technology Systems, Inc. is the provider of +// PostScript and PDF technology for software developers. +// +//---------------------------------------------------------------------------- + +#ifndef AGG_PIXFMT_RGBA_INCLUDED +#define AGG_PIXFMT_RGBA_INCLUDED + +#include +#include +#include "agg_pixfmt_base.h" +#include "agg_rendering_buffer.h" + +namespace agg24markers +{ + template inline T sd_min(T a, T b) { return (a < b) ? a : b; } + template inline T sd_max(T a, T b) { return (a > b) ? a : b; } + + inline rgba & clip(rgba & c) + { + if (c.a > 1) c.a = 1; else if (c.a < 0) c.a = 0; + if (c.r > c.a) c.r = c.a; else if (c.r < 0) c.r = 0; + if (c.g > c.a) c.g = c.a; else if (c.g < 0) c.g = 0; + if (c.b > c.a) c.b = c.a; else if (c.b < 0) c.b = 0; + return c; + } + + //=========================================================multiplier_rgba + template + struct multiplier_rgba + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + + //-------------------------------------------------------------------- + static AGG_INLINE void premultiply(value_type* p) + { + value_type a = p[Order::A]; + p[Order::R] = color_type::multiply(p[Order::R], a); + p[Order::G] = color_type::multiply(p[Order::G], a); + p[Order::B] = color_type::multiply(p[Order::B], a); + } + + + //-------------------------------------------------------------------- + static AGG_INLINE void demultiply(value_type* p) + { + value_type a = p[Order::A]; + p[Order::R] = color_type::demultiply(p[Order::R], a); + p[Order::G] = color_type::demultiply(p[Order::G], a); + p[Order::B] = color_type::demultiply(p[Order::B], a); + } + }; + + //=====================================================apply_gamma_dir_rgba + template + class apply_gamma_dir_rgba + { + public: + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + + apply_gamma_dir_rgba(const GammaLut& gamma) : m_gamma(gamma) {} + + AGG_INLINE void operator () (value_type* p) + { + p[Order::R] = m_gamma.dir(p[Order::R]); + p[Order::G] = m_gamma.dir(p[Order::G]); + p[Order::B] = m_gamma.dir(p[Order::B]); + } + + private: + const GammaLut& m_gamma; + }; + + //=====================================================apply_gamma_inv_rgba + template class apply_gamma_inv_rgba + { + public: + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + + apply_gamma_inv_rgba(const GammaLut& gamma) : m_gamma(gamma) {} + + AGG_INLINE void operator () (value_type* p) + { + p[Order::R] = m_gamma.inv(p[Order::R]); + p[Order::G] = m_gamma.inv(p[Order::G]); + p[Order::B] = m_gamma.inv(p[Order::B]); + } + + private: + const GammaLut& m_gamma; + }; + + + template + struct conv_rgba_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + + //-------------------------------------------------------------------- + static AGG_INLINE void set_plain_color(value_type* p, color_type c) + { + c.premultiply(); + p[Order::R] = c.r; + p[Order::G] = c.g; + p[Order::B] = c.b; + p[Order::A] = c.a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE color_type get_plain_color(const value_type* p) + { + return color_type( + p[Order::R], + p[Order::G], + p[Order::B], + p[Order::A]).demultiply(); + } + }; + + template + struct conv_rgba_plain + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + + //-------------------------------------------------------------------- + static AGG_INLINE void set_plain_color(value_type* p, color_type c) + { + p[Order::R] = c.r; + p[Order::G] = c.g; + p[Order::B] = c.b; + p[Order::A] = c.a; + } + + //-------------------------------------------------------------------- + static AGG_INLINE color_type get_plain_color(const value_type* p) + { + return color_type( + p[Order::R], + p[Order::G], + p[Order::B], + p[Order::A]); + } + }; + + //=============================================================blender_rgba + // Blends "plain" (i.e. non-premultiplied) colors into a premultiplied buffer. + template + struct blender_rgba : conv_rgba_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's + // compositing function. Since the render buffer is in fact premultiplied + // we omit the initial premultiplication and final demultiplication. + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + p[Order::R] = color_type::lerp(p[Order::R], cr, alpha); + p[Order::G] = color_type::lerp(p[Order::G], cg, alpha); + p[Order::B] = color_type::lerp(p[Order::B], cb, alpha); + p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha); + } + }; + + + //========================================================blender_rgba_pre + // Blends premultiplied colors into a premultiplied buffer. + template + struct blender_rgba_pre : conv_rgba_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + // Blend pixels using the premultiplied form of Alvy-Ray Smith's + // compositing function. + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, + color_type::mult_cover(cr, cover), + color_type::mult_cover(cg, cover), + color_type::mult_cover(cb, cover), + color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha); + p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha); + p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha); + p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha); + } + }; + + //======================================================blender_rgba_plain + // Blends "plain" (non-premultiplied) colors into a plain (non-premultiplied) buffer. + template + struct blender_rgba_plain : conv_rgba_plain + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's + // compositing function. + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover) + { + blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover)); + } + + //-------------------------------------------------------------------- + static AGG_INLINE void blend_pix(value_type* p, + value_type cr, value_type cg, value_type cb, value_type alpha) + { + if (alpha > color_type::empty_value()) + { + calc_type a = p[Order::A]; + calc_type r = color_type::multiply(p[Order::R], a); + calc_type g = color_type::multiply(p[Order::G], a); + calc_type b = color_type::multiply(p[Order::B], a); + p[Order::R] = color_type::lerp(r, cr, alpha); + p[Order::G] = color_type::lerp(g, cg, alpha); + p[Order::B] = color_type::lerp(b, cb, alpha); + p[Order::A] = color_type::prelerp(a, alpha, alpha); + multiplier_rgba::demultiply(p); + } + } + }; + + // SVG compositing operations. + // For specifications, see http://www.w3.org/TR/SVGCompositing/ + + //=========================================================comp_op_rgba_clear + template + struct comp_op_rgba_clear : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = 0 + // Da' = 0 + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + if (cover >= cover_full) + { + p[0] = p[1] = p[2] = p[3] = color_type::empty_value(); + } + else if (cover > cover_none) + { + set(p, get(p, cover_full - cover)); + } + } + }; + + //===========================================================comp_op_rgba_src + template + struct comp_op_rgba_src : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca + // Da' = Sa + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + if (cover >= cover_full) + { + set(p, r, g, b, a); + } + else + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p, cover_full - cover); + d.r += s.r; + d.g += s.g; + d.b += s.b; + d.a += s.a; + set(p, d); + } + } + }; + + //===========================================================comp_op_rgba_dst + template + struct comp_op_rgba_dst : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + + // Dca' = Dca.Sa + Dca.(1 - Sa) = Dca + // Da' = Da.Sa + Da.(1 - Sa) = Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + // Well, that was easy! + } + }; + + //======================================================comp_op_rgba_src_over + template + struct comp_op_rgba_src_over : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca + Dca.(1 - Sa) = Dca + Sca - Dca.Sa + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { +#if 1 + blender_rgba_pre::blend_pix(p, r, g, b, a, cover); +#else + rgba s = get(r, g, b, a, cover); + rgba d = get(p); + d.r += s.r - d.r * s.a; + d.g += s.g - d.g * s.a; + d.b += s.b - d.b * s.a; + d.a += s.a - d.a * s.a; + set(p, d); +#endif + } + }; + + //======================================================comp_op_rgba_dst_over + template + struct comp_op_rgba_dst_over : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Dca + Sca.(1 - Da) + // Da' = Sa + Da - Sa.Da = Da + Sa.(1 - Da) + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p); + double d1a = 1 - d.a; + d.r += s.r * d1a; + d.g += s.g * d1a; + d.b += s.b * d1a; + d.a += s.a * d1a; + set(p, d); + } + }; + + //======================================================comp_op_rgba_src_in + template + struct comp_op_rgba_src_in : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca.Da + // Da' = Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + double da = ColorT::to_double(p[Order::A]); + if (da > 0) + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p, cover_full - cover); + d.r += s.r * da; + d.g += s.g * da; + d.b += s.b * da; + d.a += s.a * da; + set(p, d); + } + } + }; + + //======================================================comp_op_rgba_dst_in + template + struct comp_op_rgba_dst_in : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Dca.Sa + // Da' = Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + double sa = ColorT::to_double(a); + rgba d = get(p, cover_full - cover); + rgba d2 = get(p, cover); + d.r += d2.r * sa; + d.g += d2.g * sa; + d.b += d2.b * sa; + d.a += d2.a * sa; + set(p, d); + } + }; + + //======================================================comp_op_rgba_src_out + template + struct comp_op_rgba_src_out : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca.(1 - Da) + // Da' = Sa.(1 - Da) + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p, cover_full - cover); + double d1a = 1 - ColorT::to_double(p[Order::A]); + d.r += s.r * d1a; + d.g += s.g * d1a; + d.b += s.b * d1a; + d.a += s.a * d1a; + set(p, d); + } + }; + + //======================================================comp_op_rgba_dst_out + template + struct comp_op_rgba_dst_out : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Dca.(1 - Sa) + // Da' = Da.(1 - Sa) + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba d = get(p, cover_full - cover); + rgba dc = get(p, cover); + double s1a = 1 - ColorT::to_double(a); + d.r += dc.r * s1a; + d.g += dc.g * s1a; + d.b += dc.b * s1a; + d.a += dc.a * s1a; + set(p, d); + } + }; + + //=====================================================comp_op_rgba_src_atop + template + struct comp_op_rgba_src_atop : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca.Da + Dca.(1 - Sa) + // Da' = Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p); + double s1a = 1 - s.a; + d.r = s.r * d.a + d.r * s1a; + d.g = s.g * d.a + d.g * s1a; + d.b = s.b * d.a + d.g * s1a; + set(p, d); + } + }; + + //=====================================================comp_op_rgba_dst_atop + template + struct comp_op_rgba_dst_atop : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Dca.Sa + Sca.(1 - Da) + // Da' = Sa + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba sc = get(r, g, b, a, cover); + rgba dc = get(p, cover); + rgba d = get(p, cover_full - cover); + double sa = ColorT::to_double(a); + double d1a = 1 - ColorT::to_double(p[Order::A]); + d.r += dc.r * sa + sc.r * d1a; + d.g += dc.g * sa + sc.g * d1a; + d.b += dc.b * sa + sc.b * d1a; + d.a += sc.a; + set(p, d); + } + }; + + //=========================================================comp_op_rgba_xor + template + struct comp_op_rgba_xor : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca.(1 - Da) + Dca.(1 - Sa) + // Da' = Sa + Da - 2.Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + rgba d = get(p); + double s1a = 1 - s.a; + double d1a = 1 - ColorT::to_double(p[Order::A]); + d.r = s.r * d1a + d.r * s1a; + d.g = s.g * d1a + d.g * s1a; + d.b = s.b * d1a + d.b * s1a; + d.a = s.a + d.a - 2 * s.a * d.a; + set(p, d); + } + }; + + //=========================================================comp_op_rgba_plus + template + struct comp_op_rgba_plus : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca + Dca + // Da' = Sa + Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + d.a = sd_min(d.a + s.a, 1.0); + d.r = sd_min(d.r + s.r, d.a); + d.g = sd_min(d.g + s.g, d.a); + d.b = sd_min(d.b + s.b, d.a); + set(p, clip(d)); + } + } + }; + + //========================================================comp_op_rgba_minus + // Note: not included in SVG spec. + template + struct comp_op_rgba_minus : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Dca - Sca + // Da' = 1 - (1 - Sa).(1 - Da) = Da + Sa - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + d.a += s.a - s.a * d.a; + d.r = sd_max(d.r - s.r, 0.0); + d.g = sd_max(d.g - s.g, 0.0); + d.b = sd_max(d.b - s.b, 0.0); + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_multiply + template + struct comp_op_rgba_multiply : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double s1a = 1 - s.a; + double d1a = 1 - d.a; + d.r = s.r * d.r + s.r * d1a + d.r * s1a; + d.g = s.g * d.g + s.g * d1a + d.g * s1a; + d.b = s.b * d.b + s.b * d1a + d.b * s1a; + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_screen + template + struct comp_op_rgba_screen : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca + Dca - Sca.Dca + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + d.r += s.r - s.r * d.r; + d.g += s.g - s.g * d.g; + d.b += s.b - s.b * d.b; + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_overlay + template + struct comp_op_rgba_overlay : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // if 2.Dca <= Da + // Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa) + // otherwise + // Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa) + // + // Da' = Sa + Da - Sa.Da + static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a) + { + return (2 * dca <= da) ? + 2 * sca * dca + sca * d1a + dca * s1a : + sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a; + } + + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double d1a = 1 - d.a; + double s1a = 1 - s.a; + double sada = s.a * d.a; + d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a); + d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a); + d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a); + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_darken + template + struct comp_op_rgba_darken : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = min(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double d1a = 1 - d.a; + double s1a = 1 - s.a; + d.r = sd_min(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a; + d.g = sd_min(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a; + d.b = sd_min(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a; + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_lighten + template + struct comp_op_rgba_lighten : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = max(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double d1a = 1 - d.a; + double s1a = 1 - s.a; + d.r = sd_max(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a; + d.g = sd_max(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a; + d.b = sd_max(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a; + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_color_dodge + template + struct comp_op_rgba_color_dodge : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // if Sca == Sa and Dca == 0 + // Dca' = Sca.(1 - Da) + Dca.(1 - Sa) = Sca.(1 - Da) + // otherwise if Sca == Sa + // Dca' = Sa.Da + Sca.(1 - Da) + Dca.(1 - Sa) + // otherwise if Sca < Sa + // Dca' = Sa.Da.min(1, Dca/Da.Sa/(Sa - Sca)) + Sca.(1 - Da) + Dca.(1 - Sa) + // + // Da' = Sa + Da - Sa.Da + static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a) + { + if (sca < sa) return sada * sd_min(1.0, (dca / da) * sa / (sa - sca)) + sca * d1a + dca * s1a; + if (dca > 0) return sada + sca * d1a + dca * s1a; + return sca * d1a; + } + + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + if (d.a > 0) + { + double sada = s.a * d.a; + double s1a = 1 - s.a; + double d1a = 1 - d.a; + d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a); + d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a); + d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a); + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + else set(p, s); + } + } + }; + + //=====================================================comp_op_rgba_color_burn + template + struct comp_op_rgba_color_burn : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // if Sca == 0 and Dca == Da + // Dca' = Sa.Da + Dca.(1 - Sa) + // otherwise if Sca == 0 + // Dca' = Dca.(1 - Sa) + // otherwise if Sca > 0 + // Dca' = Sa.Da.(1 - min(1, (1 - Dca/Da).Sa/Sca)) + Sca.(1 - Da) + Dca.(1 - Sa) + static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a) + { + if (sca > 0) return sada * (1 - sd_min(1.0, (1 - dca / da) * sa / sca)) + sca * d1a + dca * s1a; + if (dca > da) return sada + dca * s1a; + return dca * s1a; + } + + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + if (d.a > 0) + { + double sada = s.a * d.a; + double s1a = 1 - s.a; + double d1a = 1 - d.a; + d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a); + d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a); + d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a); + d.a += s.a - sada; + set(p, clip(d)); + } + else set(p, s); + } + } + }; + + //=====================================================comp_op_rgba_hard_light + template + struct comp_op_rgba_hard_light : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // if 2.Sca < Sa + // Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa) + // otherwise + // Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa) + // + // Da' = Sa + Da - Sa.Da + static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a) + { + return (2 * sca < sa) ? + 2 * sca * dca + sca * d1a + dca * s1a : + sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a; + } + + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double d1a = 1 - d.a; + double s1a = 1 - s.a; + double sada = s.a * d.a; + d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a); + d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a); + d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a); + d.a += s.a - sada; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_soft_light + template + struct comp_op_rgba_soft_light : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // if 2.Sca <= Sa + // Dca' = Dca.Sa - (Sa.Da - 2.Sca.Da).Dca.Sa.(Sa.Da - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa) + // otherwise if 2.Sca > Sa and 4.Dca <= Da + // Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((((16.Dsa.Sa - 12).Dsa.Sa + 4).Dsa.Da) - Dsa.Da) + Sca.(1 - Da) + Dca.(1 - Sa) + // otherwise if 2.Sca > Sa and 4.Dca > Da + // Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((Dca.Sa)^0.5 - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa) + // + // Da' = Sa + Da - Sa.Da + static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a) + { + double dcasa = dca * sa; + if (2 * sca <= sa) return dcasa - (sada - 2 * sca * da) * dcasa * (sada - dcasa) + sca * d1a + dca * s1a; + if (4 * dca <= da) return dcasa + (2 * sca * da - sada) * ((((16 * dcasa - 12) * dcasa + 4) * dca * da) - dca * da) + sca * d1a + dca * s1a; + return dcasa + (2 * sca * da - sada) * (sqrt(dcasa) - dcasa) + sca * d1a + dca * s1a; + } + + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + if (d.a > 0) + { + double sada = s.a * d.a; + double s1a = 1 - s.a; + double d1a = 1 - d.a; + d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a); + d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a); + d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a); + d.a += s.a - sada; + set(p, clip(d)); + } + else set(p, s); + } + } + }; + + //=====================================================comp_op_rgba_difference + template + struct comp_op_rgba_difference : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = Sca + Dca - 2.min(Sca.Da, Dca.Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + d.r += s.r - 2 * sd_min(s.r * d.a, d.r * s.a); + d.g += s.g - 2 * sd_min(s.g * d.a, d.g * s.a); + d.b += s.b - 2 * sd_min(s.b * d.a, d.b * s.a); + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + + //=====================================================comp_op_rgba_exclusion + template + struct comp_op_rgba_exclusion : blender_base + { + typedef ColorT color_type; + typedef typename color_type::value_type value_type; + using blender_base::get; + using blender_base::set; + + // Dca' = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + rgba s = get(r, g, b, a, cover); + if (s.a > 0) + { + rgba d = get(p); + double d1a = 1 - d.a; + double s1a = 1 - s.a; + d.r = (s.r * d.a + d.r * s.a - 2 * s.r * d.r) + s.r * d1a + d.r * s1a; + d.g = (s.g * d.a + d.g * s.a - 2 * s.g * d.g) + s.g * d1a + d.g * s1a; + d.b = (s.b * d.a + d.b * s.a - 2 * s.b * d.b) + s.b * d1a + d.b * s1a; + d.a += s.a - s.a * d.a; + set(p, clip(d)); + } + } + }; + +#if 0 + //=====================================================comp_op_rgba_contrast + template struct comp_op_rgba_contrast + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + enum base_scale_e + { + base_shift = color_type::base_shift, + base_mask = color_type::base_mask + }; + + + static AGG_INLINE void blend_pix(value_type* p, + unsigned sr, unsigned sg, unsigned sb, + unsigned sa, unsigned cover) + { + if (cover < 255) + { + sr = (sr * cover + 255) >> 8; + sg = (sg * cover + 255) >> 8; + sb = (sb * cover + 255) >> 8; + sa = (sa * cover + 255) >> 8; + } + long_type dr = p[Order::R]; + long_type dg = p[Order::G]; + long_type db = p[Order::B]; + int da = p[Order::A]; + long_type d2a = da >> 1; + unsigned s2a = sa >> 1; + + int r = (int)((((dr - d2a) * int((sr - s2a)*2 + base_mask)) >> base_shift) + d2a); + int g = (int)((((dg - d2a) * int((sg - s2a)*2 + base_mask)) >> base_shift) + d2a); + int b = (int)((((db - d2a) * int((sb - s2a)*2 + base_mask)) >> base_shift) + d2a); + + r = (r < 0) ? 0 : r; + g = (g < 0) ? 0 : g; + b = (b < 0) ? 0 : b; + + p[Order::R] = (value_type)((r > da) ? da : r); + p[Order::G] = (value_type)((g > da) ? da : g); + p[Order::B] = (value_type)((b > da) ? da : b); + } + }; + + //=====================================================comp_op_rgba_invert + template struct comp_op_rgba_invert + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + enum base_scale_e + { + base_shift = color_type::base_shift, + base_mask = color_type::base_mask + }; + + // Dca' = (Da - Dca) * Sa + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + unsigned sr, unsigned sg, unsigned sb, + unsigned sa, unsigned cover) + { + sa = (sa * cover + 255) >> 8; + if (sa) + { + calc_type da = p[Order::A]; + calc_type dr = ((da - p[Order::R]) * sa + base_mask) >> base_shift; + calc_type dg = ((da - p[Order::G]) * sa + base_mask) >> base_shift; + calc_type db = ((da - p[Order::B]) * sa + base_mask) >> base_shift; + calc_type s1a = base_mask - sa; + p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift)); + p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift)); + p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift)); + p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift)); + } + } + }; + + //=================================================comp_op_rgba_invert_rgb + template struct comp_op_rgba_invert_rgb + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + enum base_scale_e + { + base_shift = color_type::base_shift, + base_mask = color_type::base_mask + }; + + // Dca' = (Da - Dca) * Sca + Dca.(1 - Sa) + // Da' = Sa + Da - Sa.Da + static AGG_INLINE void blend_pix(value_type* p, + unsigned sr, unsigned sg, unsigned sb, + unsigned sa, unsigned cover) + { + if (cover < 255) + { + sr = (sr * cover + 255) >> 8; + sg = (sg * cover + 255) >> 8; + sb = (sb * cover + 255) >> 8; + sa = (sa * cover + 255) >> 8; + } + if (sa) + { + calc_type da = p[Order::A]; + calc_type dr = ((da - p[Order::R]) * sr + base_mask) >> base_shift; + calc_type dg = ((da - p[Order::G]) * sg + base_mask) >> base_shift; + calc_type db = ((da - p[Order::B]) * sb + base_mask) >> base_shift; + calc_type s1a = base_mask - sa; + p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift)); + p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift)); + p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift)); + p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift)); + } + } + }; +#endif + + + //======================================================comp_op_table_rgba + template struct comp_op_table_rgba + { + typedef typename ColorT::value_type value_type; + typedef typename ColorT::calc_type calc_type; + typedef void (*comp_op_func_type)(value_type* p, + value_type cr, + value_type cg, + value_type cb, + value_type ca, + cover_type cover); + static comp_op_func_type g_comp_op_func[]; + }; + + //==========================================================g_comp_op_func + template + typename comp_op_table_rgba::comp_op_func_type + comp_op_table_rgba::g_comp_op_func[] = + { + comp_op_rgba_clear ::blend_pix, + comp_op_rgba_src ::blend_pix, + comp_op_rgba_dst ::blend_pix, + comp_op_rgba_src_over ::blend_pix, + comp_op_rgba_dst_over ::blend_pix, + comp_op_rgba_src_in ::blend_pix, + comp_op_rgba_dst_in ::blend_pix, + comp_op_rgba_src_out ::blend_pix, + comp_op_rgba_dst_out ::blend_pix, + comp_op_rgba_src_atop ::blend_pix, + comp_op_rgba_dst_atop ::blend_pix, + comp_op_rgba_xor ::blend_pix, + comp_op_rgba_plus ::blend_pix, + //comp_op_rgba_minus ::blend_pix, + comp_op_rgba_multiply ::blend_pix, + comp_op_rgba_screen ::blend_pix, + comp_op_rgba_overlay ::blend_pix, + comp_op_rgba_darken ::blend_pix, + comp_op_rgba_lighten ::blend_pix, + comp_op_rgba_color_dodge::blend_pix, + comp_op_rgba_color_burn ::blend_pix, + comp_op_rgba_hard_light ::blend_pix, + comp_op_rgba_soft_light ::blend_pix, + comp_op_rgba_difference ::blend_pix, + comp_op_rgba_exclusion ::blend_pix, + //comp_op_rgba_contrast ::blend_pix, + //comp_op_rgba_invert ::blend_pix, + //comp_op_rgba_invert_rgb ::blend_pix, + 0 + }; + + + //==============================================================comp_op_e + enum comp_op_e + { + comp_op_clear, //----comp_op_clear + comp_op_src, //----comp_op_src + comp_op_dst, //----comp_op_dst + comp_op_src_over, //----comp_op_src_over + comp_op_dst_over, //----comp_op_dst_over + comp_op_src_in, //----comp_op_src_in + comp_op_dst_in, //----comp_op_dst_in + comp_op_src_out, //----comp_op_src_out + comp_op_dst_out, //----comp_op_dst_out + comp_op_src_atop, //----comp_op_src_atop + comp_op_dst_atop, //----comp_op_dst_atop + comp_op_xor, //----comp_op_xor + comp_op_plus, //----comp_op_plus + //comp_op_minus, //----comp_op_minus + comp_op_multiply, //----comp_op_multiply + comp_op_screen, //----comp_op_screen + comp_op_overlay, //----comp_op_overlay + comp_op_darken, //----comp_op_darken + comp_op_lighten, //----comp_op_lighten + comp_op_color_dodge, //----comp_op_color_dodge + comp_op_color_burn, //----comp_op_color_burn + comp_op_hard_light, //----comp_op_hard_light + comp_op_soft_light, //----comp_op_soft_light + comp_op_difference, //----comp_op_difference + comp_op_exclusion, //----comp_op_exclusion + //comp_op_contrast, //----comp_op_contrast + //comp_op_invert, //----comp_op_invert + //comp_op_invert_rgb, //----comp_op_invert_rgb + + end_of_comp_op_e + }; + + + + + + + + //====================================================comp_op_adaptor_rgba + template + struct comp_op_adaptor_rgba + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + comp_op_table_rgba::g_comp_op_func[op](p, + color_type::multiply(r, a), + color_type::multiply(g, a), + color_type::multiply(b, a), + a, cover); + } + }; + + //=========================================comp_op_adaptor_clip_to_dst_rgba + template + struct comp_op_adaptor_clip_to_dst_rgba + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + r = color_type::multiply(r, a); + g = color_type::multiply(g, a); + b = color_type::multiply(b, a); + value_type da = p[Order::A]; + comp_op_table_rgba::g_comp_op_func[op](p, + color_type::multiply(r, da), + color_type::multiply(g, da), + color_type::multiply(b, da), + color_type::multiply(a, da), cover); + } + }; + + //================================================comp_op_adaptor_rgba_pre + template + struct comp_op_adaptor_rgba_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + comp_op_table_rgba::g_comp_op_func[op](p, r, g, b, a, cover); + } + }; + + //=====================================comp_op_adaptor_clip_to_dst_rgba_pre + template + struct comp_op_adaptor_clip_to_dst_rgba_pre + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + value_type da = p[Order::A]; + comp_op_table_rgba::g_comp_op_func[op](p, + color_type::multiply(r, da), + color_type::multiply(g, da), + color_type::multiply(b, da), + color_type::multiply(a, da), cover); + } + }; + + //====================================================comp_op_adaptor_rgba_plain + template + struct comp_op_adaptor_rgba_plain + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + multiplier_rgba::premultiply(p); + comp_op_adaptor_rgba::blend_pix(op, p, r, g, b, a, cover); + multiplier_rgba::demultiply(p); + } + }; + + //=========================================comp_op_adaptor_clip_to_dst_rgba_plain + template + struct comp_op_adaptor_clip_to_dst_rgba_plain + { + typedef ColorT color_type; + typedef Order order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + multiplier_rgba::premultiply(p); + comp_op_adaptor_clip_to_dst_rgba::blend_pix(op, p, r, g, b, a, cover); + multiplier_rgba::demultiply(p); + } + }; + + //=======================================================comp_adaptor_rgba + template + struct comp_adaptor_rgba + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + BlenderPre::blend_pix(p, + color_type::multiply(r, a), + color_type::multiply(g, a), + color_type::multiply(b, a), + a, cover); + } + }; + + //==========================================comp_adaptor_clip_to_dst_rgba + template + struct comp_adaptor_clip_to_dst_rgba + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + r = color_type::multiply(r, a); + g = color_type::multiply(g, a); + b = color_type::multiply(b, a); + value_type da = p[order_type::A]; + BlenderPre::blend_pix(p, + color_type::multiply(r, da), + color_type::multiply(g, da), + color_type::multiply(b, da), + color_type::multiply(a, da), cover); + } + }; + + //=======================================================comp_adaptor_rgba_pre + template + struct comp_adaptor_rgba_pre + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + BlenderPre::blend_pix(p, r, g, b, a, cover); + } + }; + + //======================================comp_adaptor_clip_to_dst_rgba_pre + template + struct comp_adaptor_clip_to_dst_rgba_pre + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + unsigned da = p[order_type::A]; + BlenderPre::blend_pix(p, + color_type::multiply(r, da), + color_type::multiply(g, da), + color_type::multiply(b, da), + color_type::multiply(a, da), + cover); + } + }; + + //=======================================================comp_adaptor_rgba_plain + template + struct comp_adaptor_rgba_plain + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + multiplier_rgba::premultiply(p); + comp_adaptor_rgba::blend_pix(op, p, r, g, b, a, cover); + multiplier_rgba::demultiply(p); + } + }; + + //==========================================comp_adaptor_clip_to_dst_rgba_plain + template + struct comp_adaptor_clip_to_dst_rgba_plain + { + typedef typename BlenderPre::color_type color_type; + typedef typename BlenderPre::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + typedef typename color_type::long_type long_type; + + static AGG_INLINE void blend_pix(unsigned op, value_type* p, + value_type r, value_type g, value_type b, value_type a, cover_type cover) + { + multiplier_rgba::premultiply(p); + comp_adaptor_clip_to_dst_rgba::blend_pix(op, p, r, g, b, a, cover); + multiplier_rgba::demultiply(p); + } + }; + + + //=================================================pixfmt_alpha_blend_rgba + template + class pixfmt_alpha_blend_rgba + { + public: + typedef pixfmt_rgba_tag pixfmt_category; + typedef RenBuf rbuf_type; + typedef typename rbuf_type::row_data row_data; + typedef Blender blender_type; + typedef typename blender_type::color_type color_type; + typedef typename blender_type::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + enum + { + num_components = 4, + pix_step = 4, + pix_width = sizeof(value_type) * pix_step, + }; + struct pixel_type + { + value_type c[num_components]; + + void set(value_type r, value_type g, value_type b, value_type a) + { + c[order_type::R] = r; + c[order_type::G] = g; + c[order_type::B] = b; + c[order_type::A] = a; + } + + void set(const color_type& color) + { + set(color.r, color.g, color.b, color.a); + } + + void get(value_type& r, value_type& g, value_type& b, value_type& a) const + { + r = c[order_type::R]; + g = c[order_type::G]; + b = c[order_type::B]; + a = c[order_type::A]; + } + + color_type get() const + { + return color_type( + c[order_type::R], + c[order_type::G], + c[order_type::B], + c[order_type::A]); + } + + pixel_type* next() + { + return (pixel_type*)(c + pix_step); + } + + const pixel_type* next() const + { + return (const pixel_type*)(c + pix_step); + } + + pixel_type* advance(int n) + { + return (pixel_type*)(c + n * pix_step); + } + + const pixel_type* advance(int n) const + { + return (const pixel_type*)(c + n * pix_step); + } + }; + + private: + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover) + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, const color_type& c) + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover) + { + if (!c.is_transparent()) + { + if (c.is_opaque() && cover == cover_mask) + { + p->set(c.r, c.g, c.b, c.a); + } + else + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover); + } + } + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c) + { + if (!c.is_transparent()) + { + if (c.is_opaque()) + { + p->set(c.r, c.g, c.b, c.a); + } + else + { + m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a); + } + } + } + + public: + //-------------------------------------------------------------------- + pixfmt_alpha_blend_rgba() : m_rbuf(0) {} + explicit pixfmt_alpha_blend_rgba(rbuf_type& rb) : m_rbuf(&rb) {} + void attach(rbuf_type& rb) { m_rbuf = &rb; } + + //-------------------------------------------------------------------- + template + bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2) + { + rect_i r(x1, y1, x2, y2); + if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1))) + { + int stride = pixf.stride(); + m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1), + (r.x2 - r.x1) + 1, + (r.y2 - r.y1) + 1, + stride); + return true; + } + return false; + } + + //-------------------------------------------------------------------- + AGG_INLINE unsigned width() const { return m_rbuf->width(); } + AGG_INLINE unsigned height() const { return m_rbuf->height(); } + AGG_INLINE int stride() const { return m_rbuf->stride(); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); } + AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); } + AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* pix_ptr(int x, int y) + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step); + } + + AGG_INLINE const int8u* pix_ptr(int x, int y) const + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step); + } + + // Return pointer to pixel value, forcing row to be allocated. + AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len) + { + return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step)); + } + + // Return pointer to pixel value, or null if row not allocated. + AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const + { + int8u* p = m_rbuf->row_ptr(y); + return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0; + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static pixel_type* pix_value_ptr(void* p) + { + return (pixel_type*)p; + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static const pixel_type* pix_value_ptr(const void* p) + { + return (const pixel_type*)p; + } + + //-------------------------------------------------------------------- + AGG_INLINE static void write_plain_color(void* p, color_type c) + { + blender_type::set_plain_color(pix_value_ptr(p)->c, c); + } + + //-------------------------------------------------------------------- + AGG_INLINE static color_type read_plain_color(const void* p) + { + return blender_type::get_plain_color(pix_value_ptr(p)->c); + } + + //-------------------------------------------------------------------- + AGG_INLINE static void make_pix(int8u* p, const color_type& c) + { + ((pixel_type*)p)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE color_type pixel(int x, int y) const + { + if (const pixel_type* p = pix_value_ptr(x, y)) + { + return p->get(); + } + return color_type::no_color(); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_pixel(int x, int y, const color_type& c) + { + pix_value_ptr(x, y, 1)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover) + { + copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_hline(int x, int y, + unsigned len, + const color_type& c) + { + pixel_type v; + v.set(c); + pixel_type* p = pix_value_ptr(x, y, len); + do + { + *p = v; + p = p->next(); + } + while (--len); + } + + + //-------------------------------------------------------------------- + AGG_INLINE void copy_vline(int x, int y, + unsigned len, + const color_type& c) + { + pixel_type v; + v.set(c); + do + { + *pix_value_ptr(x, y++, 1) = v; + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_hline(int x, int y, + unsigned len, + const color_type& c, + int8u cover) + { + if (!c.is_transparent()) + { + pixel_type* p = pix_value_ptr(x, y, len); + if (c.is_opaque() && cover == cover_mask) + { + pixel_type v; + v.set(c); + do + { + *p = v; + p = p->next(); + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + blend_pix(p, c); + p = p->next(); + } + while (--len); + } + else + { + do + { + blend_pix(p, c, cover); + p = p->next(); + } + while (--len); + } + } + } + } + + + //-------------------------------------------------------------------- + void blend_vline(int x, int y, + unsigned len, + const color_type& c, + int8u cover) + { + if (!c.is_transparent()) + { + if (c.is_opaque() && cover == cover_mask) + { + pixel_type v; + v.set(c); + do + { + *pix_value_ptr(x, y++, 1) = v; + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), c, c.a); + } + while (--len); + } + else + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), c, cover); + } + while (--len); + } + } + } + } + + + //-------------------------------------------------------------------- + void blend_solid_hspan(int x, int y, + unsigned len, + const color_type& c, + const int8u* covers) + { + if (!c.is_transparent()) + { + pixel_type* p = pix_value_ptr(x, y, len); + do + { + if (c.is_opaque() && *covers == cover_mask) + { + p->set(c); + } + else + { + blend_pix(p, c, *covers); + } + p = p->next(); + ++covers; + } + while (--len); + } + } + + + //-------------------------------------------------------------------- + void blend_solid_vspan(int x, int y, + unsigned len, + const color_type& c, + const int8u* covers) + { + if (!c.is_transparent()) + { + do + { + pixel_type* p = pix_value_ptr(x, y++, 1); + if (c.is_opaque() && *covers == cover_mask) + { + p->set(c); + } + else + { + blend_pix(p, c, *covers); + } + ++covers; + } + while (--len); + } + } + + //-------------------------------------------------------------------- + void copy_color_hspan(int x, int y, + unsigned len, + const color_type* colors) + { + pixel_type* p = pix_value_ptr(x, y, len); + do + { + p->set(*colors++); + p = p->next(); + } + while (--len); + } + + + //-------------------------------------------------------------------- + void copy_color_vspan(int x, int y, + unsigned len, + const color_type* colors) + { + do + { + pix_value_ptr(x, y++, 1)->set(*colors++); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_color_hspan(int x, int y, + unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + pixel_type* p = pix_value_ptr(x, y, len); + if (covers) + { + do + { + copy_or_blend_pix(p, *colors++, *covers++); + p = p->next(); + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(p, *colors++); + p = p->next(); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(p, *colors++, cover); + p = p->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + void blend_color_vspan(int x, int y, + unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + if (covers) + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++); + } + while (--len); + } + else + { + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + template void for_each_pixel(Function f) + { + for (unsigned y = 0; y < height(); ++y) + { + row_data r = m_rbuf->row(y); + if (r.ptr) + { + unsigned len = r.x2 - r.x1 + 1; + pixel_type* p = pix_value_ptr(r.x1, y, len); + do + { + f(p->c); + p = p->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + void premultiply() + { + for_each_pixel(multiplier_rgba::premultiply); + } + + //-------------------------------------------------------------------- + void demultiply() + { + for_each_pixel(multiplier_rgba::demultiply); + } + + //-------------------------------------------------------------------- + template void apply_gamma_dir(const GammaLut& g) + { + for_each_pixel(apply_gamma_dir_rgba(g)); + } + + //-------------------------------------------------------------------- + template void apply_gamma_inv(const GammaLut& g) + { + for_each_pixel(apply_gamma_inv_rgba(g)); + } + + //-------------------------------------------------------------------- + template void copy_from(const RenBuf2& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len) + { + if (const int8u* p = from.row_ptr(ysrc)) + { + memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width, + p + xsrc * pix_width, + len * pix_width); + } + } + + //-------------------------------------------------------------------- + // Blend from another RGBA surface. + template + void blend_from(const SrcPixelFormatRenderer& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + int srcinc = 1; + int dstinc = 1; + + if (xdst > xsrc) + { + psrc = psrc->advance(len - 1); + pdst = pdst->advance(len - 1); + srcinc = -1; + dstinc = -1; + } + + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(pdst, psrc->get()); + psrc = psrc->advance(srcinc); + pdst = pdst->advance(dstinc); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(pdst, psrc->get(), cover); + psrc = psrc->advance(srcinc); + pdst = pdst->advance(dstinc); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + // Combine single color with grayscale surface and blend. + template + void blend_from_color(const SrcPixelFormatRenderer& from, + const color_type& color, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + typedef typename SrcPixelFormatRenderer::color_type src_color_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + do + { + copy_or_blend_pix(pdst, color, + src_color_type::scale_cover(cover, psrc->c[0])); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + + //-------------------------------------------------------------------- + // Blend from color table, using grayscale surface as indexes into table. + // Obviously, this only works for integer value types. + template + void blend_from_lut(const SrcPixelFormatRenderer& from, + const color_type* color_lut, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + if (cover == cover_mask) + { + do + { + copy_or_blend_pix(pdst, color_lut[psrc->c[0]]); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + else + { + do + { + copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + } + + private: + rbuf_type* m_rbuf; + Blender m_blender; + }; + + //================================================pixfmt_custom_blend_rgba + template class pixfmt_custom_blend_rgba + { + public: + typedef pixfmt_rgba_tag pixfmt_category; + typedef RenBuf rbuf_type; + typedef typename rbuf_type::row_data row_data; + typedef Blender blender_type; + typedef typename blender_type::color_type color_type; + typedef typename blender_type::order_type order_type; + typedef typename color_type::value_type value_type; + typedef typename color_type::calc_type calc_type; + enum + { + num_components = 4, + pix_step = 4, + pix_width = sizeof(value_type) * pix_step, + }; + struct pixel_type + { + value_type c[num_components]; + + void set(value_type r, value_type g, value_type b, value_type a) + { + c[order_type::R] = r; + c[order_type::G] = g; + c[order_type::B] = b; + c[order_type::A] = a; + } + + void set(const color_type& color) + { + set(color.r, color.g, color.b, color.a); + } + + void get(value_type& r, value_type& g, value_type& b, value_type& a) const + { + r = c[order_type::R]; + g = c[order_type::G]; + b = c[order_type::B]; + a = c[order_type::A]; + } + + color_type get() const + { + return color_type( + c[order_type::R], + c[order_type::G], + c[order_type::B], + c[order_type::A]); + } + + pixel_type* next() + { + return (pixel_type*)(c + pix_step); + } + + const pixel_type* next() const + { + return (const pixel_type*)(c + pix_step); + } + + pixel_type* advance(int n) + { + return (pixel_type*)(c + n * pix_step); + } + + const pixel_type* advance(int n) const + { + return (const pixel_type*)(c + n * pix_step); + } + }; + + + private: + //-------------------------------------------------------------------- + AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full) + { + m_blender.blend_pix(m_comp_op, p->c, c.r, c.g, c.b, c.a, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full) + { + if (!c.is_transparent()) + { + if (c.is_opaque() && cover == cover_mask) + { + p->set(c.r, c.g, c.b, c.a); + } + else + { + blend_pix(p, c, cover); + } + } + } + + public: + //-------------------------------------------------------------------- + pixfmt_custom_blend_rgba() : m_rbuf(0), m_comp_op(3) {} + explicit pixfmt_custom_blend_rgba(rbuf_type& rb, unsigned comp_op=3) : + m_rbuf(&rb), + m_comp_op(comp_op) + {} + void attach(rbuf_type& rb) { m_rbuf = &rb; } + + //-------------------------------------------------------------------- + template + bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2) + { + rect_i r(x1, y1, x2, y2); + if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1))) + { + int stride = pixf.stride(); + m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1), + (r.x2 - r.x1) + 1, + (r.y2 - r.y1) + 1, + stride); + return true; + } + return false; + } + + //-------------------------------------------------------------------- + void comp_op(unsigned op) { m_comp_op = op; } + unsigned comp_op() const { return m_comp_op; } + + //-------------------------------------------------------------------- + AGG_INLINE unsigned width() const { return m_rbuf->width(); } + AGG_INLINE unsigned height() const { return m_rbuf->height(); } + AGG_INLINE int stride() const { return m_rbuf->stride(); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); } + AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); } + AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); } + + //-------------------------------------------------------------------- + AGG_INLINE int8u* pix_ptr(int x, int y) + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step); + } + + AGG_INLINE const int8u* pix_ptr(int x, int y) const + { + return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step); + } + + // Return pointer to pixel value, forcing row to be allocated. + AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len) + { + return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step)); + } + + // Return pointer to pixel value, or null if row not allocated. + AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const + { + int8u* p = m_rbuf->row_ptr(y); + return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0; + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static pixel_type* pix_value_ptr(void* p) + { + return (pixel_type*)p; + } + + // Get pixel pointer from raw buffer pointer. + AGG_INLINE static const pixel_type* pix_value_ptr(const void* p) + { + return (const pixel_type*)p; + } + + //-------------------------------------------------------------------- + AGG_INLINE static void make_pix(int8u* p, const color_type& c) + { + ((pixel_type*)p)->set(c); + } + + //-------------------------------------------------------------------- + AGG_INLINE color_type pixel(int x, int y) const + { + if (const pixel_type* p = pix_value_ptr(x, y)) + { + return p->get(); + } + return color_type::no_color(); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_pixel(int x, int y, const color_type& c) + { + make_pix(pix_value_ptr(x, y, 1), c); + } + + //-------------------------------------------------------------------- + AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover) + { + blend_pix(pix_value_ptr(x, y, 1), c, cover); + } + + //-------------------------------------------------------------------- + AGG_INLINE void copy_hline(int x, int y, + unsigned len, + const color_type& c) + { + pixel_type v; + v.set(c); + pixel_type* p = pix_value_ptr(x, y, len); + do + { + *p = v; + p = p->next(); + } + while (--len); + } + + + //-------------------------------------------------------------------- + AGG_INLINE void copy_vline(int x, int y, + unsigned len, + const color_type& c) + { + pixel_type v; + v.set(c); + do + { + *pix_value_ptr(x, y++, 1) = v; + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_hline(int x, int y, unsigned len, + const color_type& c, int8u cover) + { + + pixel_type* p = pix_value_ptr(x, y, len); + do + { + blend_pix(p, c, cover); + p = p->next(); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_vline(int x, int y, unsigned len, + const color_type& c, int8u cover) + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), c, cover); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_solid_hspan(int x, int y, unsigned len, + const color_type& c, const int8u* covers) + { + pixel_type* p = pix_value_ptr(x, y, len); + + do + { + blend_pix(p, c, *covers++); + p = p->next(); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_solid_vspan(int x, int y, unsigned len, + const color_type& c, const int8u* covers) + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), c, *covers++); + } + while (--len); + } + + //-------------------------------------------------------------------- + void copy_color_hspan(int x, int y, + unsigned len, + const color_type* colors) + { + pixel_type* p = pix_value_ptr(x, y, len); + + do + { + p->set(*colors++); + p = p->next(); + } + while (--len); + } + + //-------------------------------------------------------------------- + void copy_color_vspan(int x, int y, + unsigned len, + const color_type* colors) + { + do + { + pix_value_ptr(x, y++, 1)->set(*colors++); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_color_hspan(int x, int y, unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + pixel_type* p = pix_value_ptr(x, y, len); + + do + { + blend_pix(p, *colors++, covers ? *covers++ : cover); + p = p->next(); + } + while (--len); + } + + //-------------------------------------------------------------------- + void blend_color_vspan(int x, int y, unsigned len, + const color_type* colors, + const int8u* covers, + int8u cover) + { + do + { + blend_pix(pix_value_ptr(x, y++, 1), *colors++, covers ? *covers++ : cover); + } + while (--len); + + } + + //-------------------------------------------------------------------- + template void for_each_pixel(Function f) + { + unsigned y; + for (y = 0; y < height(); ++y) + { + row_data r = m_rbuf->row(y); + if (r.ptr) + { + unsigned len = r.x2 - r.x1 + 1; + pixel_type* p = pix_value_ptr(r.x1, y, len); + do + { + f(p->c); + p = p->next(); + } + while (--len); + } + } + } + + //-------------------------------------------------------------------- + void premultiply() + { + for_each_pixel(multiplier_rgba::premultiply); + } + + //-------------------------------------------------------------------- + void demultiply() + { + for_each_pixel(multiplier_rgba::demultiply); + } + + //-------------------------------------------------------------------- + template void apply_gamma_dir(const GammaLut& g) + { + for_each_pixel(apply_gamma_dir_rgba(g)); + } + + //-------------------------------------------------------------------- + template void apply_gamma_inv(const GammaLut& g) + { + for_each_pixel(apply_gamma_inv_rgba(g)); + } + + //-------------------------------------------------------------------- + template void copy_from(const RenBuf2& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len) + { + if (const int8u* p = from.row_ptr(ysrc)) + { + memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width, + p + xsrc * pix_width, + len * pix_width); + } + } + + //-------------------------------------------------------------------- + // Blend from another RGBA surface. + template + void blend_from(const SrcPixelFormatRenderer& from, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + int srcinc = 1; + int dstinc = 1; + + if (xdst > xsrc) + { + psrc = psrc->advance(len - 1); + pdst = pdst->advance(len - 1); + srcinc = -1; + dstinc = -1; + } + + do + { + blend_pix(pdst, psrc->get(), cover); + psrc = psrc->advance(srcinc); + pdst = pdst->advance(dstinc); + } + while (--len); + } + } + + //-------------------------------------------------------------------- + // Blend from single color, using grayscale surface as alpha channel. + template + void blend_from_color(const SrcPixelFormatRenderer& from, + const color_type& color, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + typedef typename SrcPixelFormatRenderer::color_type src_color_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + do + { + blend_pix(pdst, color, + src_color_type::scale_cover(cover, psrc->c[0])); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + + //-------------------------------------------------------------------- + // Blend from color table, using grayscale surface as indexes into table. + // Obviously, this only works for integer value types. + template + void blend_from_lut(const SrcPixelFormatRenderer& from, + const color_type* color_lut, + int xdst, int ydst, + int xsrc, int ysrc, + unsigned len, + int8u cover) + { + typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type; + + if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc)) + { + pixel_type* pdst = pix_value_ptr(xdst, ydst, len); + + do + { + blend_pix(pdst, color_lut[psrc->c[0]], cover); + psrc = psrc->next(); + pdst = pdst->next(); + } + while (--len); + } + } + + private: + rbuf_type* m_rbuf; + Blender m_blender; + unsigned m_comp_op; + }; + + + //----------------------------------------------------------------------- + typedef blender_rgba blender_rgba32; + typedef blender_rgba blender_argb32; + typedef blender_rgba blender_abgr32; + typedef blender_rgba blender_bgra32; + + typedef blender_rgba blender_srgba32; + typedef blender_rgba blender_sargb32; + typedef blender_rgba blender_sabgr32; + typedef blender_rgba blender_sbgra32; + + typedef blender_rgba_pre blender_rgba32_pre; + typedef blender_rgba_pre blender_argb32_pre; + typedef blender_rgba_pre blender_abgr32_pre; + typedef blender_rgba_pre blender_bgra32_pre; + + typedef blender_rgba_pre blender_srgba32_pre; + typedef blender_rgba_pre blender_sargb32_pre; + typedef blender_rgba_pre blender_sabgr32_pre; + typedef blender_rgba_pre blender_sbgra32_pre; + + typedef blender_rgba_plain blender_rgba32_plain; + typedef blender_rgba_plain blender_argb32_plain; + typedef blender_rgba_plain blender_abgr32_plain; + typedef blender_rgba_plain blender_bgra32_plain; + + typedef blender_rgba_plain blender_srgba32_plain; + typedef blender_rgba_plain blender_sargb32_plain; + typedef blender_rgba_plain blender_sabgr32_plain; + typedef blender_rgba_plain blender_sbgra32_plain; + + typedef blender_rgba blender_rgba64; + typedef blender_rgba blender_argb64; + typedef blender_rgba blender_abgr64; + typedef blender_rgba blender_bgra64; + + typedef blender_rgba_pre blender_rgba64_pre; + typedef blender_rgba_pre blender_argb64_pre; + typedef blender_rgba_pre blender_abgr64_pre; + typedef blender_rgba_pre blender_bgra64_pre; + + typedef blender_rgba_plain blender_rgba64_plain; + typedef blender_rgba_plain blender_argb64_plain; + typedef blender_rgba_plain blender_abgr64_plain; + typedef blender_rgba_plain blender_bgra64_plain; + + typedef blender_rgba blender_rgba128; + typedef blender_rgba blender_argb128; + typedef blender_rgba blender_abgr128; + typedef blender_rgba blender_bgra128; + + typedef blender_rgba_pre blender_rgba128_pre; + typedef blender_rgba_pre blender_argb128_pre; + typedef blender_rgba_pre blender_abgr128_pre; + typedef blender_rgba_pre blender_bgra128_pre; + + typedef blender_rgba_plain blender_rgba128_plain; + typedef blender_rgba_plain blender_argb128_plain; + typedef blender_rgba_plain blender_abgr128_plain; + typedef blender_rgba_plain blender_bgra128_plain; + + + //----------------------------------------------------------------------- + typedef pixfmt_alpha_blend_rgba pixfmt_rgba32; + typedef pixfmt_alpha_blend_rgba pixfmt_argb32; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr32; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra32; + + typedef pixfmt_alpha_blend_rgba pixfmt_srgba32; + typedef pixfmt_alpha_blend_rgba pixfmt_sargb32; + typedef pixfmt_alpha_blend_rgba pixfmt_sabgr32; + typedef pixfmt_alpha_blend_rgba pixfmt_sbgra32; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_argb32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra32_pre; + + typedef pixfmt_alpha_blend_rgba pixfmt_srgba32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_sargb32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_sabgr32_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_sbgra32_pre; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_argb32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra32_plain; + + typedef pixfmt_alpha_blend_rgba pixfmt_srgba32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_sargb32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_sabgr32_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_sbgra32_plain; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba64; + typedef pixfmt_alpha_blend_rgba pixfmt_argb64; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr64; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra64; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba64_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_argb64_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr64_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra64_pre; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba64_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_argb64_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr64_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra64_plain; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba128; + typedef pixfmt_alpha_blend_rgba pixfmt_argb128; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr128; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra128; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba128_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_argb128_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr128_pre; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra128_pre; + + typedef pixfmt_alpha_blend_rgba pixfmt_rgba128_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_argb128_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_abgr128_plain; + typedef pixfmt_alpha_blend_rgba pixfmt_bgra128_plain; + +} + +#endif + diff --git a/kiva/markers/agg/agg_renderer_base.h b/kiva/markers/agg/agg_renderer_base.h new file mode 100644 index 000000000..545cba713 --- /dev/null +++ b/kiva/markers/agg/agg_renderer_base.h @@ -0,0 +1,731 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// class renderer_base +// +//---------------------------------------------------------------------------- + +#ifndef AGG_RENDERER_BASE_INCLUDED +#define AGG_RENDERER_BASE_INCLUDED + +#include "agg_basics.h" +#include "agg_rendering_buffer.h" + +namespace agg24markers +{ + + //-----------------------------------------------------------renderer_base + template class renderer_base + { + public: + typedef PixelFormat pixfmt_type; + typedef typename pixfmt_type::color_type color_type; + typedef typename pixfmt_type::row_data row_data; + + //-------------------------------------------------------------------- + renderer_base() : m_ren(0), m_clip_box(1, 1, 0, 0) {} + explicit renderer_base(pixfmt_type& ren) : + m_ren(&ren), + m_clip_box(0, 0, ren.width() - 1, ren.height() - 1) + {} + void attach(pixfmt_type& ren) + { + m_ren = &ren; + m_clip_box = rect_i(0, 0, ren.width() - 1, ren.height() - 1); + } + + //-------------------------------------------------------------------- + const pixfmt_type& ren() const { return *m_ren; } + pixfmt_type& ren() { return *m_ren; } + + //-------------------------------------------------------------------- + unsigned width() const { return m_ren->width(); } + unsigned height() const { return m_ren->height(); } + + //-------------------------------------------------------------------- + bool clip_box(int x1, int y1, int x2, int y2) + { + rect_i cb(x1, y1, x2, y2); + cb.normalize(); + if(cb.clip(rect_i(0, 0, width() - 1, height() - 1))) + { + m_clip_box = cb; + return true; + } + m_clip_box.x1 = 1; + m_clip_box.y1 = 1; + m_clip_box.x2 = 0; + m_clip_box.y2 = 0; + return false; + } + + //-------------------------------------------------------------------- + void reset_clipping(bool visibility) + { + if(visibility) + { + m_clip_box.x1 = 0; + m_clip_box.y1 = 0; + m_clip_box.x2 = width() - 1; + m_clip_box.y2 = height() - 1; + } + else + { + m_clip_box.x1 = 1; + m_clip_box.y1 = 1; + m_clip_box.x2 = 0; + m_clip_box.y2 = 0; + } + } + + //-------------------------------------------------------------------- + void clip_box_naked(int x1, int y1, int x2, int y2) + { + m_clip_box.x1 = x1; + m_clip_box.y1 = y1; + m_clip_box.x2 = x2; + m_clip_box.y2 = y2; + } + + //-------------------------------------------------------------------- + bool inbox(int x, int y) const + { + return x >= m_clip_box.x1 && y >= m_clip_box.y1 && + x <= m_clip_box.x2 && y <= m_clip_box.y2; + } + + //-------------------------------------------------------------------- + const rect_i& clip_box() const { return m_clip_box; } + int xmin() const { return m_clip_box.x1; } + int ymin() const { return m_clip_box.y1; } + int xmax() const { return m_clip_box.x2; } + int ymax() const { return m_clip_box.y2; } + + //-------------------------------------------------------------------- + const rect_i& bounding_clip_box() const { return m_clip_box; } + int bounding_xmin() const { return m_clip_box.x1; } + int bounding_ymin() const { return m_clip_box.y1; } + int bounding_xmax() const { return m_clip_box.x2; } + int bounding_ymax() const { return m_clip_box.y2; } + + //-------------------------------------------------------------------- + void clear(const color_type& c) + { + unsigned y; + if(width()) + { + for(y = 0; y < height(); y++) + { + m_ren->copy_hline(0, y, width(), c); + } + } + } + + + //-------------------------------------------------------------------- + void fill(const color_type& c) + { + unsigned y; + if(width()) + { + for(y = 0; y < height(); y++) + { + m_ren->blend_hline(0, y, width(), c, cover_mask); + } + } + } + + //-------------------------------------------------------------------- + void copy_pixel(int x, int y, const color_type& c) + { + if(inbox(x, y)) + { + m_ren->copy_pixel(x, y, c); + } + } + + //-------------------------------------------------------------------- + void blend_pixel(int x, int y, const color_type& c, cover_type cover) + { + if(inbox(x, y)) + { + m_ren->blend_pixel(x, y, c, cover); + } + } + + //-------------------------------------------------------------------- + color_type pixel(int x, int y) const + { + return inbox(x, y) ? + m_ren->pixel(x, y) : + color_type::no_color(); + } + + //-------------------------------------------------------------------- + void copy_hline(int x1, int y, int x2, const color_type& c) + { + if(x1 > x2) { int t = x2; x2 = x1; x1 = t; } + if(y > ymax()) return; + if(y < ymin()) return; + if(x1 > xmax()) return; + if(x2 < xmin()) return; + + if(x1 < xmin()) x1 = xmin(); + if(x2 > xmax()) x2 = xmax(); + + m_ren->copy_hline(x1, y, x2 - x1 + 1, c); + } + + //-------------------------------------------------------------------- + void copy_vline(int x, int y1, int y2, const color_type& c) + { + if(y1 > y2) { int t = y2; y2 = y1; y1 = t; } + if(x > xmax()) return; + if(x < xmin()) return; + if(y1 > ymax()) return; + if(y2 < ymin()) return; + + if(y1 < ymin()) y1 = ymin(); + if(y2 > ymax()) y2 = ymax(); + + m_ren->copy_vline(x, y1, y2 - y1 + 1, c); + } + + //-------------------------------------------------------------------- + void blend_hline(int x1, int y, int x2, + const color_type& c, cover_type cover) + { + if(x1 > x2) { int t = x2; x2 = x1; x1 = t; } + if(y > ymax()) return; + if(y < ymin()) return; + if(x1 > xmax()) return; + if(x2 < xmin()) return; + + if(x1 < xmin()) x1 = xmin(); + if(x2 > xmax()) x2 = xmax(); + + m_ren->blend_hline(x1, y, x2 - x1 + 1, c, cover); + } + + //-------------------------------------------------------------------- + void blend_vline(int x, int y1, int y2, + const color_type& c, cover_type cover) + { + if(y1 > y2) { int t = y2; y2 = y1; y1 = t; } + if(x > xmax()) return; + if(x < xmin()) return; + if(y1 > ymax()) return; + if(y2 < ymin()) return; + + if(y1 < ymin()) y1 = ymin(); + if(y2 > ymax()) y2 = ymax(); + + m_ren->blend_vline(x, y1, y2 - y1 + 1, c, cover); + } + + + //-------------------------------------------------------------------- + void copy_bar(int x1, int y1, int x2, int y2, const color_type& c) + { + rect_i rc(x1, y1, x2, y2); + rc.normalize(); + if(rc.clip(clip_box())) + { + int y; + for(y = rc.y1; y <= rc.y2; y++) + { + m_ren->copy_hline(rc.x1, y, unsigned(rc.x2 - rc.x1 + 1), c); + } + } + } + + //-------------------------------------------------------------------- + void blend_bar(int x1, int y1, int x2, int y2, + const color_type& c, cover_type cover) + { + rect_i rc(x1, y1, x2, y2); + rc.normalize(); + if(rc.clip(clip_box())) + { + int y; + for(y = rc.y1; y <= rc.y2; y++) + { + m_ren->blend_hline(rc.x1, + y, + unsigned(rc.x2 - rc.x1 + 1), + c, + cover); + } + } + } + + //-------------------------------------------------------------------- + void blend_solid_hspan(int x, int y, int len, + const color_type& c, + const cover_type* covers) + { + if(y > ymax()) return; + if(y < ymin()) return; + + if(x < xmin()) + { + len -= xmin() - x; + if(len <= 0) return; + covers += xmin() - x; + x = xmin(); + } + if(x + len > xmax()) + { + len = xmax() - x + 1; + if(len <= 0) return; + } + m_ren->blend_solid_hspan(x, y, len, c, covers); + } + + //-------------------------------------------------------------------- + void blend_solid_vspan(int x, int y, int len, + const color_type& c, + const cover_type* covers) + { + if(x > xmax()) return; + if(x < xmin()) return; + + if(y < ymin()) + { + len -= ymin() - y; + if(len <= 0) return; + covers += ymin() - y; + y = ymin(); + } + if(y + len > ymax()) + { + len = ymax() - y + 1; + if(len <= 0) return; + } + m_ren->blend_solid_vspan(x, y, len, c, covers); + } + + + //-------------------------------------------------------------------- + void copy_color_hspan(int x, int y, int len, const color_type* colors) + { + if(y > ymax()) return; + if(y < ymin()) return; + + if(x < xmin()) + { + int d = xmin() - x; + len -= d; + if(len <= 0) return; + colors += d; + x = xmin(); + } + if(x + len > xmax()) + { + len = xmax() - x + 1; + if(len <= 0) return; + } + m_ren->copy_color_hspan(x, y, len, colors); + } + + + //-------------------------------------------------------------------- + void copy_color_vspan(int x, int y, int len, const color_type* colors) + { + if(x > xmax()) return; + if(x < xmin()) return; + + if(y < ymin()) + { + int d = ymin() - y; + len -= d; + if(len <= 0) return; + colors += d; + y = ymin(); + } + if(y + len > ymax()) + { + len = ymax() - y + 1; + if(len <= 0) return; + } + m_ren->copy_color_vspan(x, y, len, colors); + } + + + //-------------------------------------------------------------------- + void blend_color_hspan(int x, int y, int len, + const color_type* colors, + const cover_type* covers, + cover_type cover = agg24markers::cover_full) + { + if(y > ymax()) return; + if(y < ymin()) return; + + if(x < xmin()) + { + int d = xmin() - x; + len -= d; + if(len <= 0) return; + if(covers) covers += d; + colors += d; + x = xmin(); + } + if(x + len > xmax()) + { + len = xmax() - x + 1; + if(len <= 0) return; + } + m_ren->blend_color_hspan(x, y, len, colors, covers, cover); + } + + //-------------------------------------------------------------------- + void blend_color_vspan(int x, int y, int len, + const color_type* colors, + const cover_type* covers, + cover_type cover = agg24markers::cover_full) + { + if(x > xmax()) return; + if(x < xmin()) return; + + if(y < ymin()) + { + int d = ymin() - y; + len -= d; + if(len <= 0) return; + if(covers) covers += d; + colors += d; + y = ymin(); + } + if(y + len > ymax()) + { + len = ymax() - y + 1; + if(len <= 0) return; + } + m_ren->blend_color_vspan(x, y, len, colors, covers, cover); + } + + //-------------------------------------------------------------------- + rect_i clip_rect_area(rect_i& dst, rect_i& src, int wsrc, int hsrc) const + { + rect_i rc(0,0,0,0); + rect_i cb = clip_box(); + ++cb.x2; + ++cb.y2; + + if(src.x1 < 0) + { + dst.x1 -= src.x1; + src.x1 = 0; + } + if(src.y1 < 0) + { + dst.y1 -= src.y1; + src.y1 = 0; + } + + if(src.x2 > wsrc) src.x2 = wsrc; + if(src.y2 > hsrc) src.y2 = hsrc; + + if(dst.x1 < cb.x1) + { + src.x1 += cb.x1 - dst.x1; + dst.x1 = cb.x1; + } + if(dst.y1 < cb.y1) + { + src.y1 += cb.y1 - dst.y1; + dst.y1 = cb.y1; + } + + if(dst.x2 > cb.x2) dst.x2 = cb.x2; + if(dst.y2 > cb.y2) dst.y2 = cb.y2; + + rc.x2 = dst.x2 - dst.x1; + rc.y2 = dst.y2 - dst.y1; + + if(rc.x2 > src.x2 - src.x1) rc.x2 = src.x2 - src.x1; + if(rc.y2 > src.y2 - src.y1) rc.y2 = src.y2 - src.y1; + return rc; + } + + //-------------------------------------------------------------------- + template + void copy_from(const RenBuf& src, + const rect_i* rect_src_ptr = 0, + int dx = 0, + int dy = 0) + { + rect_i rsrc(0, 0, src.width(), src.height()); + if(rect_src_ptr) + { + rsrc.x1 = rect_src_ptr->x1; + rsrc.y1 = rect_src_ptr->y1; + rsrc.x2 = rect_src_ptr->x2 + 1; + rsrc.y2 = rect_src_ptr->y2 + 1; + } + + // Version with xdst, ydst (absolute positioning) + //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1); + + // Version with dx, dy (relative positioning) + rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy); + + rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height()); + + if(rc.x2 > 0) + { + int incy = 1; + if(rdst.y1 > rsrc.y1) + { + rsrc.y1 += rc.y2 - 1; + rdst.y1 += rc.y2 - 1; + incy = -1; + } + while(rc.y2 > 0) + { + m_ren->copy_from(src, + rdst.x1, rdst.y1, + rsrc.x1, rsrc.y1, + rc.x2); + rdst.y1 += incy; + rsrc.y1 += incy; + --rc.y2; + } + } + } + + //-------------------------------------------------------------------- + template + void blend_from(const SrcPixelFormatRenderer& src, + const rect_i* rect_src_ptr = 0, + int dx = 0, + int dy = 0, + cover_type cover = agg24markers::cover_full) + { + rect_i rsrc(0, 0, src.width(), src.height()); + if(rect_src_ptr) + { + rsrc.x1 = rect_src_ptr->x1; + rsrc.y1 = rect_src_ptr->y1; + rsrc.x2 = rect_src_ptr->x2 + 1; + rsrc.y2 = rect_src_ptr->y2 + 1; + } + + // Version with xdst, ydst (absolute positioning) + //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1); + + // Version with dx, dy (relative positioning) + rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy); + rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height()); + + if(rc.x2 > 0) + { + int incy = 1; + if(rdst.y1 > rsrc.y1) + { + rsrc.y1 += rc.y2 - 1; + rdst.y1 += rc.y2 - 1; + incy = -1; + } + while(rc.y2 > 0) + { + typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1); + if(rw.ptr) + { + int x1src = rsrc.x1; + int x1dst = rdst.x1; + int len = rc.x2; + if(rw.x1 > x1src) + { + x1dst += rw.x1 - x1src; + len -= rw.x1 - x1src; + x1src = rw.x1; + } + if(len > 0) + { + if(x1src + len-1 > rw.x2) + { + len -= x1src + len - rw.x2 - 1; + } + if(len > 0) + { + m_ren->blend_from(src, + x1dst, rdst.y1, + x1src, rsrc.y1, + len, + cover); + } + } + } + rdst.y1 += incy; + rsrc.y1 += incy; + --rc.y2; + } + } + } + + //-------------------------------------------------------------------- + template + void blend_from_color(const SrcPixelFormatRenderer& src, + const color_type& color, + const rect_i* rect_src_ptr = 0, + int dx = 0, + int dy = 0, + cover_type cover = agg24markers::cover_full) + { + rect_i rsrc(0, 0, src.width(), src.height()); + if(rect_src_ptr) + { + rsrc.x1 = rect_src_ptr->x1; + rsrc.y1 = rect_src_ptr->y1; + rsrc.x2 = rect_src_ptr->x2 + 1; + rsrc.y2 = rect_src_ptr->y2 + 1; + } + + // Version with xdst, ydst (absolute positioning) + //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1); + + // Version with dx, dy (relative positioning) + rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy); + rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height()); + + if(rc.x2 > 0) + { + int incy = 1; + if(rdst.y1 > rsrc.y1) + { + rsrc.y1 += rc.y2 - 1; + rdst.y1 += rc.y2 - 1; + incy = -1; + } + while(rc.y2 > 0) + { + typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1); + if(rw.ptr) + { + int x1src = rsrc.x1; + int x1dst = rdst.x1; + int len = rc.x2; + if(rw.x1 > x1src) + { + x1dst += rw.x1 - x1src; + len -= rw.x1 - x1src; + x1src = rw.x1; + } + if(len > 0) + { + if(x1src + len-1 > rw.x2) + { + len -= x1src + len - rw.x2 - 1; + } + if(len > 0) + { + m_ren->blend_from_color(src, + color, + x1dst, rdst.y1, + x1src, rsrc.y1, + len, + cover); + } + } + } + rdst.y1 += incy; + rsrc.y1 += incy; + --rc.y2; + } + } + } + + //-------------------------------------------------------------------- + template + void blend_from_lut(const SrcPixelFormatRenderer& src, + const color_type* color_lut, + const rect_i* rect_src_ptr = 0, + int dx = 0, + int dy = 0, + cover_type cover = agg24markers::cover_full) + { + rect_i rsrc(0, 0, src.width(), src.height()); + if(rect_src_ptr) + { + rsrc.x1 = rect_src_ptr->x1; + rsrc.y1 = rect_src_ptr->y1; + rsrc.x2 = rect_src_ptr->x2 + 1; + rsrc.y2 = rect_src_ptr->y2 + 1; + } + + // Version with xdst, ydst (absolute positioning) + //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1); + + // Version with dx, dy (relative positioning) + rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy); + rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height()); + + if(rc.x2 > 0) + { + int incy = 1; + if(rdst.y1 > rsrc.y1) + { + rsrc.y1 += rc.y2 - 1; + rdst.y1 += rc.y2 - 1; + incy = -1; + } + while(rc.y2 > 0) + { + typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1); + if(rw.ptr) + { + int x1src = rsrc.x1; + int x1dst = rdst.x1; + int len = rc.x2; + if(rw.x1 > x1src) + { + x1dst += rw.x1 - x1src; + len -= rw.x1 - x1src; + x1src = rw.x1; + } + if(len > 0) + { + if(x1src + len-1 > rw.x2) + { + len -= x1src + len - rw.x2 - 1; + } + if(len > 0) + { + m_ren->blend_from_lut(src, + color_lut, + x1dst, rdst.y1, + x1src, rsrc.y1, + len, + cover); + } + } + } + rdst.y1 += incy; + rsrc.y1 += incy; + --rc.y2; + } + } + } + + private: + pixfmt_type* m_ren; + rect_i m_clip_box; + }; + + +} + +#endif diff --git a/kiva/markers/agg/agg_renderer_markers.h b/kiva/markers/agg/agg_renderer_markers.h new file mode 100644 index 000000000..0c014ed91 --- /dev/null +++ b/kiva/markers/agg/agg_renderer_markers.h @@ -0,0 +1,711 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// class renderer_markers +// +//---------------------------------------------------------------------------- + +#ifndef AGG_RENDERER_MARKERS_INCLUDED +#define AGG_RENDERER_MARKERS_INCLUDED + +#include "agg_basics.h" +#include "agg_renderer_primitives.h" + +namespace agg24markers +{ + + //---------------------------------------------------------------marker_e + enum marker_e + { + marker_square, + marker_diamond, + marker_circle, + marker_crossed_circle, + marker_semiellipse_left, + marker_semiellipse_right, + marker_semiellipse_up, + marker_semiellipse_down, + marker_triangle_left, + marker_triangle_right, + marker_triangle_up, + marker_triangle_down, + marker_four_rays, + marker_cross, + marker_x, + marker_dash, + marker_dot, + marker_pixel, + + end_of_markers + }; + + + + //--------------------------------------------------------renderer_markers + template class renderer_markers : + public renderer_primitives + { + public: + typedef renderer_primitives base_type; + typedef BaseRenderer base_ren_type; + typedef typename base_ren_type::color_type color_type; + + //-------------------------------------------------------------------- + renderer_markers(base_ren_type& rbuf) : + base_type(rbuf) + {} + + //-------------------------------------------------------------------- + bool visible(int x, int y, int r) const + { + rect_i rc(x-r, y-r, x+y, y+r); + return rc.clip(base_type::ren().bounding_clip_box()); + } + + //-------------------------------------------------------------------- + void square(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) base_type::outlined_rectangle(x-r, y-r, x+r, y+r); + else base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + + //-------------------------------------------------------------------- + void diamond(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + do + { + base_type::ren().blend_pixel(x - dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dx, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y - dy, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_hline(x-dx+1, y+dy, x+dx-1, base_type::fill_color(), cover_full); + base_type::ren().blend_hline(x-dx+1, y-dy, x+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + ++dx; + } + while(dy <= 0); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + //-------------------------------------------------------------------- + void circle(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) base_type::outlined_ellipse(x, y, r, r); + else base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + + + + //-------------------------------------------------------------------- + void crossed_circle(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + base_type::outlined_ellipse(x, y, r, r); + int r6 = r + (r >> 1); + if(r <= 2) r6++; + r >>= 1; + base_type::ren().blend_hline(x-r6, y, x-r, base_type::line_color(), cover_full); + base_type::ren().blend_hline(x+r, y, x+r6, base_type::line_color(), cover_full); + base_type::ren().blend_vline(x, y-r6, y-r, base_type::line_color(), cover_full); + base_type::ren().blend_vline(x, y+r, y+r6, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //------------------------------------------------------------------------ + void semiellipse_left(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int r8 = r * 4 / 5; + int dy = -r; + int dx = 0; + ellipse_bresenham_interpolator ei(r * 3 / 5, r+r8); + do + { + dx += ei.dx(); + dy += ei.dy(); + + base_type::ren().blend_pixel(x + dy, y + dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dy, y - dx, base_type::line_color(), cover_full); + + if(ei.dy() && dx) + { + base_type::ren().blend_vline(x+dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + } + ++ei; + } + while(dy < r8); + base_type::ren().blend_vline(x+dy, y-dx, y+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void semiellipse_right(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int r8 = r * 4 / 5; + int dy = -r; + int dx = 0; + ellipse_bresenham_interpolator ei(r * 3 / 5, r+r8); + do + { + dx += ei.dx(); + dy += ei.dy(); + + base_type::ren().blend_pixel(x - dy, y + dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y - dx, base_type::line_color(), cover_full); + + if(ei.dy() && dx) + { + base_type::ren().blend_vline(x-dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + } + ++ei; + } + while(dy < r8); + base_type::ren().blend_vline(x-dy, y-dx, y+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void semiellipse_up(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int r8 = r * 4 / 5; + int dy = -r; + int dx = 0; + ellipse_bresenham_interpolator ei(r * 3 / 5, r+r8); + do + { + dx += ei.dx(); + dy += ei.dy(); + + base_type::ren().blend_pixel(x + dx, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dx, y - dy, base_type::line_color(), cover_full); + + if(ei.dy() && dx) + { + base_type::ren().blend_hline(x-dx+1, y-dy, x+dx-1, base_type::fill_color(), cover_full); + } + ++ei; + } + while(dy < r8); + base_type::ren().blend_hline(x-dx, y-dy-1, x+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void semiellipse_down(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int r8 = r * 4 / 5; + int dy = -r; + int dx = 0; + ellipse_bresenham_interpolator ei(r * 3 / 5, r+r8); + do + { + dx += ei.dx(); + dy += ei.dy(); + + base_type::ren().blend_pixel(x + dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dx, y + dy, base_type::line_color(), cover_full); + + if(ei.dy() && dx) + { + base_type::ren().blend_hline(x-dx+1, y+dy, x+dx-1, base_type::fill_color(), cover_full); + } + ++ei; + } + while(dy < r8); + base_type::ren().blend_hline(x-dx, y+dy+1, x+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void triangle_left(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + int flip = 0; + int r6 = r * 3 / 5; + do + { + base_type::ren().blend_pixel(x + dy, y - dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dy, y + dx, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_vline(x+dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + dx += flip; + flip ^= 1; + } + while(dy < r6); + base_type::ren().blend_vline(x+dy, y-dx, y+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void triangle_right(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + int flip = 0; + int r6 = r * 3 / 5; + do + { + base_type::ren().blend_pixel(x - dy, y - dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y + dx, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_vline(x-dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + dx += flip; + flip ^= 1; + } + while(dy < r6); + base_type::ren().blend_vline(x-dy, y-dx, y+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void triangle_up(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + int flip = 0; + int r6 = r * 3 / 5; + do + { + base_type::ren().blend_pixel(x - dx, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y - dy, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_hline(x-dx+1, y-dy, x+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + dx += flip; + flip ^= 1; + } + while(dy < r6); + base_type::ren().blend_hline(x-dx, y-dy, x+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void triangle_down(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + int flip = 0; + int r6 = r * 3 / 5; + do + { + base_type::ren().blend_pixel(x - dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y + dy, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_hline(x-dx+1, y+dy, x+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + dx += flip; + flip ^= 1; + } + while(dy < r6); + base_type::ren().blend_hline(x-dx, y+dy, x+dx, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void four_rays(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r; + int dx = 0; + int flip = 0; + int r3 = -(r / 3); + do + { + base_type::ren().blend_pixel(x - dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dx, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dx, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dy, y - dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dy, y + dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y - dx, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y + dx, base_type::line_color(), cover_full); + + if(dx) + { + base_type::ren().blend_hline(x-dx+1, y+dy, x+dx-1, base_type::fill_color(), cover_full); + base_type::ren().blend_hline(x-dx+1, y-dy, x+dx-1, base_type::fill_color(), cover_full); + base_type::ren().blend_vline(x+dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + base_type::ren().blend_vline(x-dy, y-dx+1, y+dx-1, base_type::fill_color(), cover_full); + } + ++dy; + dx += flip; + flip ^= 1; + } + while(dy <= r3); + base_type::solid_rectangle(x+r3+1, y+r3+1, x-r3-1, y-r3-1); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void cross(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + base_type::ren().blend_vline(x, y-r, y+r, base_type::line_color(), cover_full); + base_type::ren().blend_hline(x-r, y, x+r, base_type::line_color(), cover_full); + } + else + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + } + + + //-------------------------------------------------------------------- + void xing(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) + { + int dy = -r * 7 / 10; + do + { + base_type::ren().blend_pixel(x + dy, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y + dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x + dy, y - dy, base_type::line_color(), cover_full); + base_type::ren().blend_pixel(x - dy, y - dy, base_type::line_color(), cover_full); + ++dy; + } + while(dy < 0); + } + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + + + //-------------------------------------------------------------------- + void dash(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) base_type::ren().blend_hline(x-r, y, x+r, base_type::line_color(), cover_full); + else base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + + + //-------------------------------------------------------------------- + void dot(int x, int y, int r) + { + if(visible(x, y, r)) + { + if(r) base_type::solid_ellipse(x, y, r, r); + else base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + } + + //-------------------------------------------------------------------- + void pixel(int x, int y, int) + { + base_type::ren().blend_pixel(x, y, base_type::fill_color(), cover_full); + } + + //-------------------------------------------------------------------- + void marker(int x, int y, int r, marker_e type) + { + switch(type) + { + case marker_square: square(x, y, r); break; + case marker_diamond: diamond(x, y, r); break; + case marker_circle: circle(x, y, r); break; + case marker_crossed_circle: crossed_circle(x, y, r); break; + case marker_semiellipse_left: semiellipse_left(x, y, r); break; + case marker_semiellipse_right: semiellipse_right(x, y, r); break; + case marker_semiellipse_up: semiellipse_up(x, y, r); break; + case marker_semiellipse_down: semiellipse_down(x, y, r); break; + case marker_triangle_left: triangle_left(x, y, r); break; + case marker_triangle_right: triangle_right(x, y, r); break; + case marker_triangle_up: triangle_up(x, y, r); break; + case marker_triangle_down: triangle_down(x, y, r); break; + case marker_four_rays: four_rays(x, y, r); break; + case marker_cross: cross(x, y, r); break; + case marker_x: xing(x, y, r); break; + case marker_dash: dash(x, y, r); break; + case marker_dot: dot(x, y, r); break; + case marker_pixel: pixel(x, y, r); break; + case end_of_markers: break; + } + } + + + //-------------------------------------------------------------------- + template + void markers(int n, const T* x, const T* y, T r, marker_e type) + { + if(n <= 0) return; + if(r == 0) + { + do + { + base_type::ren().blend_pixel(int(*x), int(*y), base_type::fill_color(), cover_full); + ++x; + ++y; + } + while(--n); + return; + } + + switch(type) + { + case marker_square: do { square (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_diamond: do { diamond (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_circle: do { circle (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_crossed_circle: do { crossed_circle (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_semiellipse_left: do { semiellipse_left (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_semiellipse_right: do { semiellipse_right(int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_semiellipse_up: do { semiellipse_up (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_semiellipse_down: do { semiellipse_down (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_triangle_left: do { triangle_left (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_triangle_right: do { triangle_right (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_triangle_up: do { triangle_up (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_triangle_down: do { triangle_down (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_four_rays: do { four_rays (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_cross: do { cross (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_x: do { xing (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_dash: do { dash (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_dot: do { dot (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case marker_pixel: do { pixel (int(*x), int(*y), int(r)); ++x; ++y; } while(--n); break; + case end_of_markers: break; + } + } + + //-------------------------------------------------------------------- + template + void markers(int n, const T* x, const T* y, const T* r, marker_e type) + { + if(n <= 0) return; + switch(type) + { + case marker_square: do { square (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_diamond: do { diamond (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_circle: do { circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_crossed_circle: do { crossed_circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_semiellipse_left: do { semiellipse_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_semiellipse_right: do { semiellipse_right(int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_semiellipse_up: do { semiellipse_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_semiellipse_down: do { semiellipse_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_triangle_left: do { triangle_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_triangle_right: do { triangle_right (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_triangle_up: do { triangle_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_triangle_down: do { triangle_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_four_rays: do { four_rays (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_cross: do { cross (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_x: do { xing (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_dash: do { dash (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_dot: do { dot (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case marker_pixel: do { pixel (int(*x), int(*y), int(*r)); ++x; ++y; ++r; } while(--n); break; + case end_of_markers: break; + } + } + + //-------------------------------------------------------------------- + template + void markers(int n, const T* x, const T* y, const T* r, const color_type* fc, marker_e type) + { + if(n <= 0) return; + switch(type) + { + case marker_square: do { base_type::fill_color(*fc); square (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_diamond: do { base_type::fill_color(*fc); diamond (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_circle: do { base_type::fill_color(*fc); circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_crossed_circle: do { base_type::fill_color(*fc); crossed_circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_semiellipse_left: do { base_type::fill_color(*fc); semiellipse_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_semiellipse_right: do { base_type::fill_color(*fc); semiellipse_right(int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_semiellipse_up: do { base_type::fill_color(*fc); semiellipse_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_semiellipse_down: do { base_type::fill_color(*fc); semiellipse_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_triangle_left: do { base_type::fill_color(*fc); triangle_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_triangle_right: do { base_type::fill_color(*fc); triangle_right (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_triangle_up: do { base_type::fill_color(*fc); triangle_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_triangle_down: do { base_type::fill_color(*fc); triangle_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_four_rays: do { base_type::fill_color(*fc); four_rays (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_cross: do { base_type::fill_color(*fc); cross (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_x: do { base_type::fill_color(*fc); xing (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_dash: do { base_type::fill_color(*fc); dash (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_dot: do { base_type::fill_color(*fc); dot (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case marker_pixel: do { base_type::fill_color(*fc); pixel (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; } while(--n); break; + case end_of_markers: break; + } + } + + //-------------------------------------------------------------------- + template + void markers(int n, const T* x, const T* y, const T* r, const color_type* fc, const color_type* lc, marker_e type) + { + if(n <= 0) return; + switch(type) + { + case marker_square: do { base_type::fill_color(*fc); base_type::line_color(*lc); square (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_diamond: do { base_type::fill_color(*fc); base_type::line_color(*lc); diamond (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_circle: do { base_type::fill_color(*fc); base_type::line_color(*lc); circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_crossed_circle: do { base_type::fill_color(*fc); base_type::line_color(*lc); crossed_circle (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_semiellipse_left: do { base_type::fill_color(*fc); base_type::line_color(*lc); semiellipse_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_semiellipse_right: do { base_type::fill_color(*fc); base_type::line_color(*lc); semiellipse_right(int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_semiellipse_up: do { base_type::fill_color(*fc); base_type::line_color(*lc); semiellipse_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_semiellipse_down: do { base_type::fill_color(*fc); base_type::line_color(*lc); semiellipse_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_triangle_left: do { base_type::fill_color(*fc); base_type::line_color(*lc); triangle_left (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_triangle_right: do { base_type::fill_color(*fc); base_type::line_color(*lc); triangle_right (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_triangle_up: do { base_type::fill_color(*fc); base_type::line_color(*lc); triangle_up (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_triangle_down: do { base_type::fill_color(*fc); base_type::line_color(*lc); triangle_down (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_four_rays: do { base_type::fill_color(*fc); base_type::line_color(*lc); four_rays (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_cross: do { base_type::fill_color(*fc); base_type::line_color(*lc); cross (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_x: do { base_type::fill_color(*fc); base_type::line_color(*lc); xing (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_dash: do { base_type::fill_color(*fc); base_type::line_color(*lc); dash (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_dot: do { base_type::fill_color(*fc); base_type::line_color(*lc); dot (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case marker_pixel: do { base_type::fill_color(*fc); base_type::line_color(*lc); pixel (int(*x), int(*y), int(*r)); ++x; ++y; ++r; ++fc; ++lc; } while(--n); break; + case end_of_markers: break; + } + } + }; + +} + +#endif diff --git a/kiva/markers/agg/agg_renderer_primitives.h b/kiva/markers/agg/agg_renderer_primitives.h new file mode 100644 index 000000000..4fd812962 --- /dev/null +++ b/kiva/markers/agg/agg_renderer_primitives.h @@ -0,0 +1,224 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// class renderer_primitives +// +//---------------------------------------------------------------------------- + +#ifndef AGG_RENDERER_PRIMITIVES_INCLUDED +#define AGG_RENDERER_PRIMITIVES_INCLUDED + +#include "agg_basics.h" +#include "agg_renderer_base.h" +#include "agg_dda_line.h" +#include "agg_ellipse_bresenham.h" + +namespace agg24markers +{ + //-----------------------------------------------------renderer_primitives + template class renderer_primitives + { + public: + typedef BaseRenderer base_ren_type; + typedef typename base_ren_type::color_type color_type; + + //-------------------------------------------------------------------- + explicit renderer_primitives(base_ren_type& ren) : + m_ren(&ren), + m_fill_color(), + m_line_color(), + m_curr_x(0), + m_curr_y(0) + {} + void attach(base_ren_type& ren) { m_ren = &ren; } + + //-------------------------------------------------------------------- + static int coord(double c) + { + return iround(c * line_bresenham_interpolator::subpixel_scale); + } + + //-------------------------------------------------------------------- + void fill_color(const color_type& c) { m_fill_color = c; } + void line_color(const color_type& c) { m_line_color = c; } + const color_type& fill_color() const { return m_fill_color; } + const color_type& line_color() const { return m_line_color; } + + //-------------------------------------------------------------------- + void rectangle(int x1, int y1, int x2, int y2) + { + m_ren->blend_hline(x1, y1, x2-1, m_line_color, cover_full); + m_ren->blend_vline(x2, y1, y2-1, m_line_color, cover_full); + m_ren->blend_hline(x1+1, y2, x2, m_line_color, cover_full); + m_ren->blend_vline(x1, y1+1, y2, m_line_color, cover_full); + } + + //-------------------------------------------------------------------- + void solid_rectangle(int x1, int y1, int x2, int y2) + { + m_ren->blend_bar(x1, y1, x2, y2, m_fill_color, cover_full); + } + + //-------------------------------------------------------------------- + void outlined_rectangle(int x1, int y1, int x2, int y2) + { + rectangle(x1, y1, x2, y2); + m_ren->blend_bar(x1+1, y1+1, x2-1, y2-1, m_fill_color, cover_full); + } + + //-------------------------------------------------------------------- + void ellipse(int x, int y, int rx, int ry) + { + ellipse_bresenham_interpolator ei(rx, ry); + int dx = 0; + int dy = -ry; + do + { + dx += ei.dx(); + dy += ei.dy(); + m_ren->blend_pixel(x + dx, y + dy, m_line_color, cover_full); + m_ren->blend_pixel(x + dx, y - dy, m_line_color, cover_full); + m_ren->blend_pixel(x - dx, y - dy, m_line_color, cover_full); + m_ren->blend_pixel(x - dx, y + dy, m_line_color, cover_full); + ++ei; + } + while(dy < 0); + } + + //-------------------------------------------------------------------- + void solid_ellipse(int x, int y, int rx, int ry) + { + ellipse_bresenham_interpolator ei(rx, ry); + int dx = 0; + int dy = -ry; + int dy0 = dy; + int dx0 = dx; + + do + { + dx += ei.dx(); + dy += ei.dy(); + + if(dy != dy0) + { + m_ren->blend_hline(x-dx0, y+dy0, x+dx0, m_fill_color, cover_full); + m_ren->blend_hline(x-dx0, y-dy0, x+dx0, m_fill_color, cover_full); + } + dx0 = dx; + dy0 = dy; + ++ei; + } + while(dy < 0); + m_ren->blend_hline(x-dx0, y+dy0, x+dx0, m_fill_color, cover_full); + } + + //-------------------------------------------------------------------- + void outlined_ellipse(int x, int y, int rx, int ry) + { + ellipse_bresenham_interpolator ei(rx, ry); + int dx = 0; + int dy = -ry; + + do + { + dx += ei.dx(); + dy += ei.dy(); + + m_ren->blend_pixel(x + dx, y + dy, m_line_color, cover_full); + m_ren->blend_pixel(x + dx, y - dy, m_line_color, cover_full); + m_ren->blend_pixel(x - dx, y - dy, m_line_color, cover_full); + m_ren->blend_pixel(x - dx, y + dy, m_line_color, cover_full); + + if(ei.dy() && dx) + { + m_ren->blend_hline(x-dx+1, y+dy, x+dx-1, m_fill_color, cover_full); + m_ren->blend_hline(x-dx+1, y-dy, x+dx-1, m_fill_color, cover_full); + } + ++ei; + } + while(dy < 0); + } + + //-------------------------------------------------------------------- + void line(int x1, int y1, int x2, int y2, bool last=false) + { + line_bresenham_interpolator li(x1, y1, x2, y2); + + unsigned len = li.len(); + if(len == 0) + { + if(last) + { + m_ren->blend_pixel(li.line_lr(x1), li.line_lr(y1), m_line_color, cover_full); + } + return; + } + + if(last) ++len; + + if(li.is_ver()) + { + do + { + m_ren->blend_pixel(li.x2(), li.y1(), m_line_color, cover_full); + li.vstep(); + } + while(--len); + } + else + { + do + { + m_ren->blend_pixel(li.x1(), li.y2(), m_line_color, cover_full); + li.hstep(); + } + while(--len); + } + } + + //-------------------------------------------------------------------- + void move_to(int x, int y) + { + m_curr_x = x; + m_curr_y = y; + } + + //-------------------------------------------------------------------- + void line_to(int x, int y, bool last=false) + { + line(m_curr_x, m_curr_y, x, y, last); + m_curr_x = x; + m_curr_y = y; + } + + //-------------------------------------------------------------------- + const base_ren_type& ren() const { return *m_ren; } + base_ren_type& ren() { return *m_ren; } + + //-------------------------------------------------------------------- + const rendering_buffer& rbuf() const { return m_ren->rbuf(); } + rendering_buffer& rbuf() { return m_ren->rbuf(); } + + private: + base_ren_type* m_ren; + color_type m_fill_color; + color_type m_line_color; + int m_curr_x; + int m_curr_y; + }; + +} + +#endif diff --git a/kiva/markers/agg/agg_rendering_buffer.h b/kiva/markers/agg/agg_rendering_buffer.h new file mode 100644 index 000000000..02b5aed76 --- /dev/null +++ b/kiva/markers/agg/agg_rendering_buffer.h @@ -0,0 +1,300 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// class rendering_buffer +// +//---------------------------------------------------------------------------- + +#ifndef AGG_RENDERING_BUFFER_INCLUDED +#define AGG_RENDERING_BUFFER_INCLUDED + +#include "agg_array.h" + +namespace agg24markers +{ + + //===========================================================row_accessor + template class row_accessor + { + public: + typedef const_row_info row_data; + + //------------------------------------------------------------------- + row_accessor() : + m_buf(0), + m_start(0), + m_width(0), + m_height(0), + m_stride(0) + { + } + + //-------------------------------------------------------------------- + row_accessor(T* buf, unsigned width, unsigned height, int stride) : + m_buf(0), + m_start(0), + m_width(0), + m_height(0), + m_stride(0) + { + attach(buf, width, height, stride); + } + + + //-------------------------------------------------------------------- + void attach(T* buf, unsigned width, unsigned height, int stride) + { + m_buf = m_start = buf; + m_width = width; + m_height = height; + m_stride = stride; + if(stride < 0) + { + m_start = m_buf - (AGG_INT64)(height - 1) * stride; + } + } + + //-------------------------------------------------------------------- + AGG_INLINE T* buf() { return m_buf; } + AGG_INLINE const T* buf() const { return m_buf; } + AGG_INLINE unsigned width() const { return m_width; } + AGG_INLINE unsigned height() const { return m_height; } + AGG_INLINE int stride() const { return m_stride; } + AGG_INLINE unsigned stride_abs() const + { + return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride); + } + + //-------------------------------------------------------------------- + AGG_INLINE T* row_ptr(int, int y, unsigned) + { + return m_start + y * (AGG_INT64)m_stride; + } + AGG_INLINE T* row_ptr(int y) { return m_start + y * (AGG_INT64)m_stride; } + AGG_INLINE const T* row_ptr(int y) const { return m_start + y * (AGG_INT64)m_stride; } + AGG_INLINE row_data row (int y) const + { + return row_data(0, m_width-1, row_ptr(y)); + } + + //-------------------------------------------------------------------- + template + void copy_from(const RenBuf& src) + { + unsigned h = height(); + if(src.height() < h) h = src.height(); + + unsigned l = stride_abs(); + if(src.stride_abs() < l) l = src.stride_abs(); + + l *= sizeof(T); + + unsigned y; + unsigned w = width(); + for (y = 0; y < h; y++) + { + memcpy(row_ptr(0, y, w), src.row_ptr(y), l); + } + } + + //-------------------------------------------------------------------- + void clear(T value) + { + unsigned y; + unsigned w = width(); + unsigned stride = stride_abs(); + for(y = 0; y < height(); y++) + { + T* p = row_ptr(0, y, w); + unsigned x; + for(x = 0; x < stride; x++) + { + *p++ = value; + } + } + } + + private: + //-------------------------------------------------------------------- + T* m_buf; // Pointer to renrdering buffer + T* m_start; // Pointer to first pixel depending on stride + unsigned m_width; // Width in pixels + unsigned m_height; // Height in pixels + int m_stride; // Number of bytes per row. Can be < 0 + }; + + + + + //==========================================================row_ptr_cache + template class row_ptr_cache + { + public: + typedef const_row_info row_data; + + //------------------------------------------------------------------- + row_ptr_cache() : + m_buf(0), + m_rows(), + m_width(0), + m_height(0), + m_stride(0) + { + } + + //-------------------------------------------------------------------- + row_ptr_cache(T* buf, unsigned width, unsigned height, int stride) : + m_buf(0), + m_rows(), + m_width(0), + m_height(0), + m_stride(0) + { + attach(buf, width, height, stride); + } + + //-------------------------------------------------------------------- + void attach(T* buf, unsigned width, unsigned height, int stride) + { + m_buf = buf; + m_width = width; + m_height = height; + m_stride = stride; + if(height > m_rows.size()) + { + m_rows.resize(height); + } + + T* row_ptr = m_buf; + + if(stride < 0) + { + row_ptr = m_buf - (AGG_INT64)(height - 1) * stride; + } + + T** rows = &m_rows[0]; + + while(height--) + { + *rows++ = row_ptr; + row_ptr += stride; + } + } + + //-------------------------------------------------------------------- + AGG_INLINE T* buf() { return m_buf; } + AGG_INLINE const T* buf() const { return m_buf; } + AGG_INLINE unsigned width() const { return m_width; } + AGG_INLINE unsigned height() const { return m_height; } + AGG_INLINE int stride() const { return m_stride; } + AGG_INLINE unsigned stride_abs() const + { + return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride); + } + + //-------------------------------------------------------------------- + AGG_INLINE T* row_ptr(int, int y, unsigned) + { + return m_rows[y]; + } + AGG_INLINE T* row_ptr(int y) { return m_rows[y]; } + AGG_INLINE const T* row_ptr(int y) const { return m_rows[y]; } + AGG_INLINE row_data row (int y) const + { + return row_data(0, m_width-1, m_rows[y]); + } + + //-------------------------------------------------------------------- + T const* const* rows() const { return &m_rows[0]; } + + //-------------------------------------------------------------------- + template + void copy_from(const RenBuf& src) + { + unsigned h = height(); + if(src.height() < h) h = src.height(); + + unsigned l = stride_abs(); + if(src.stride_abs() < l) l = src.stride_abs(); + + l *= sizeof(T); + + unsigned y; + unsigned w = width(); + for (y = 0; y < h; y++) + { + memcpy(row_ptr(0, y, w), src.row_ptr(y), l); + } + } + + //-------------------------------------------------------------------- + void clear(T value) + { + unsigned y; + unsigned w = width(); + unsigned stride = stride_abs(); + for(y = 0; y < height(); y++) + { + T* p = row_ptr(0, y, w); + unsigned x; + for(x = 0; x < stride; x++) + { + *p++ = value; + } + } + } + + private: + //-------------------------------------------------------------------- + T* m_buf; // Pointer to renrdering buffer + pod_array m_rows; // Pointers to each row of the buffer + unsigned m_width; // Width in pixels + unsigned m_height; // Height in pixels + int m_stride; // Number of bytes per row. Can be < 0 + }; + + + + + //========================================================rendering_buffer + // + // The definition of the main type for accessing the rows in the frame + // buffer. It provides functionality to navigate to the rows in a + // rectangular matrix, from top to bottom or from bottom to top depending + // on stride. + // + // row_accessor is cheap to create/destroy, but performs one multiplication + // when calling row_ptr(). + // + // row_ptr_cache creates an array of pointers to rows, so, the access + // via row_ptr() may be faster. But it requires memory allocation + // when creating. For example, on typical Intel Pentium hardware + // row_ptr_cache speeds span_image_filter_rgb_nn up to 10% + // + // It's used only in short hand typedefs like pixfmt_rgba32 and can be + // redefined in agg_config.h + // In real applications you can use both, depending on your needs + //------------------------------------------------------------------------ +#ifdef AGG_RENDERING_BUFFER + typedef AGG_RENDERING_BUFFER rendering_buffer; +#else +// typedef row_ptr_cache rendering_buffer; + typedef row_accessor rendering_buffer; +#endif + +} + + +#endif diff --git a/kiva/markers/agg/agg_trans_affine.h b/kiva/markers/agg/agg_trans_affine.h new file mode 100644 index 000000000..2b9258ceb --- /dev/null +++ b/kiva/markers/agg/agg_trans_affine.h @@ -0,0 +1,518 @@ +//---------------------------------------------------------------------------- +// Anti-Grain Geometry - Version 2.4 +// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) +// +// Permission to copy, use, modify, sell and distribute this software +// is granted provided this copyright notice appears in all copies. +// This software is provided "as is" without express or implied +// warranty, and with no claim as to its suitability for any purpose. +// +//---------------------------------------------------------------------------- +// Contact: mcseem@antigrain.com +// mcseemagg@yahoo.com +// http://www.antigrain.com +//---------------------------------------------------------------------------- +// +// Affine transformation classes. +// +//---------------------------------------------------------------------------- +#ifndef AGG_TRANS_AFFINE_INCLUDED +#define AGG_TRANS_AFFINE_INCLUDED + +#include +#include "agg_basics.h" + +namespace agg24markers +{ + const double affine_epsilon = 1e-14; + + //============================================================trans_affine + // + // See Implementation agg_trans_affine.cpp + // + // Affine transformation are linear transformations in Cartesian coordinates + // (strictly speaking not only in Cartesian, but for the beginning we will + // think so). They are rotation, scaling, translation and skewing. + // After any affine transformation a line segment remains a line segment + // and it will never become a curve. + // + // There will be no math about matrix calculations, since it has been + // described many times. Ask yourself a very simple question: + // "why do we need to understand and use some matrix stuff instead of just + // rotating, scaling and so on". The answers are: + // + // 1. Any combination of transformations can be done by only 4 multiplications + // and 4 additions in floating point. + // 2. One matrix transformation is equivalent to the number of consecutive + // discrete transformations, i.e. the matrix "accumulates" all transformations + // in the order of their settings. Suppose we have 4 transformations: + // * rotate by 30 degrees, + // * scale X to 2.0, + // * scale Y to 1.5, + // * move to (100, 100). + // The result will depend on the order of these transformations, + // and the advantage of matrix is that the sequence of discret calls: + // rotate(30), scaleX(2.0), scaleY(1.5), move(100,100) + // will have exactly the same result as the following matrix transformations: + // + // affine_matrix m; + // m *= rotate_matrix(30); + // m *= scaleX_matrix(2.0); + // m *= scaleY_matrix(1.5); + // m *= move_matrix(100,100); + // + // m.transform_my_point_at_last(x, y); + // + // What is the good of it? In real life we will set-up the matrix only once + // and then transform many points, let alone the convenience to set any + // combination of transformations. + // + // So, how to use it? Very easy - literally as it's shown above. Not quite, + // let us write a correct example: + // + // agg24markers::trans_affine m; + // m *= agg24markers::trans_affine_rotation(30.0 * 3.1415926 / 180.0); + // m *= agg24markers::trans_affine_scaling(2.0, 1.5); + // m *= agg24markers::trans_affine_translation(100.0, 100.0); + // m.transform(&x, &y); + // + // The affine matrix is all you need to perform any linear transformation, + // but all transformations have origin point (0,0). It means that we need to + // use 2 translations if we want to rotate someting around (100,100): + // + // m *= agg24markers::trans_affine_translation(-100.0, -100.0); // move to (0,0) + // m *= agg24markers::trans_affine_rotation(30.0 * 3.1415926 / 180.0); // rotate + // m *= agg24markers::trans_affine_translation(100.0, 100.0); // move back to (100,100) + //---------------------------------------------------------------------- + struct trans_affine + { + double sx, shy, shx, sy, tx, ty; + + //------------------------------------------ Construction + // Identity matrix + trans_affine() : + sx(1.0), shy(0.0), shx(0.0), sy(1.0), tx(0.0), ty(0.0) + {} + + // Custom matrix. Usually used in derived classes + trans_affine(double v0, double v1, double v2, + double v3, double v4, double v5) : + sx(v0), shy(v1), shx(v2), sy(v3), tx(v4), ty(v5) + {} + + // Custom matrix from m[6] + explicit trans_affine(const double* m) : + sx(m[0]), shy(m[1]), shx(m[2]), sy(m[3]), tx(m[4]), ty(m[5]) + {} + + // Rectangle to a parallelogram. + trans_affine(double x1, double y1, double x2, double y2, + const double* parl) + { + rect_to_parl(x1, y1, x2, y2, parl); + } + + // Parallelogram to a rectangle. + trans_affine(const double* parl, + double x1, double y1, double x2, double y2) + { + parl_to_rect(parl, x1, y1, x2, y2); + } + + // Arbitrary parallelogram transformation. + trans_affine(const double* src, const double* dst) + { + parl_to_parl(src, dst); + } + + //---------------------------------- Parellelogram transformations + // transform a parallelogram to another one. Src and dst are + // pointers to arrays of three points (double[6], x1,y1,...) that + // identify three corners of the parallelograms assuming implicit + // fourth point. The arguments are arrays of double[6] mapped + // to x1,y1, x2,y2, x3,y3 where the coordinates are: + // *-----------------* + // / (x3,y3)/ + // / / + // /(x1,y1) (x2,y2)/ + // *-----------------* + const trans_affine& parl_to_parl(const double* src, + const double* dst); + + const trans_affine& rect_to_parl(double x1, double y1, + double x2, double y2, + const double* parl); + + const trans_affine& parl_to_rect(const double* parl, + double x1, double y1, + double x2, double y2); + + + //------------------------------------------ Operations + // Reset - load an identity matrix + const trans_affine& reset(); + + // Direct transformations operations + const trans_affine& translate(double x, double y); + const trans_affine& rotate(double a); + const trans_affine& scale(double s); + const trans_affine& scale(double x, double y); + + // Multiply matrix to another one + const trans_affine& multiply(const trans_affine& m); + + // Multiply "m" to "this" and assign the result to "this" + const trans_affine& premultiply(const trans_affine& m); + + // Multiply matrix to inverse of another one + const trans_affine& multiply_inv(const trans_affine& m); + + // Multiply inverse of "m" to "this" and assign the result to "this" + const trans_affine& premultiply_inv(const trans_affine& m); + + // Invert matrix. Do not try to invert degenerate matrices, + // there's no check for validity. If you set scale to 0 and + // then try to invert matrix, expect unpredictable result. + const trans_affine& invert(); + + // Mirroring around X + const trans_affine& flip_x(); + + // Mirroring around Y + const trans_affine& flip_y(); + + //------------------------------------------- Load/Store + // Store matrix to an array [6] of double + void store_to(double* m) const + { + *m++ = sx; *m++ = shy; *m++ = shx; *m++ = sy; *m++ = tx; *m++ = ty; + } + + // Load matrix from an array [6] of double + const trans_affine& load_from(const double* m) + { + sx = *m++; shy = *m++; shx = *m++; sy = *m++; tx = *m++; ty = *m++; + return *this; + } + + //------------------------------------------- Operators + + // Multiply the matrix by another one + const trans_affine& operator *= (const trans_affine& m) + { + return multiply(m); + } + + // Multiply the matrix by inverse of another one + const trans_affine& operator /= (const trans_affine& m) + { + return multiply_inv(m); + } + + // Multiply the matrix by another one and return + // the result in a separete matrix. + trans_affine operator * (const trans_affine& m) const + { + return trans_affine(*this).multiply(m); + } + + // Multiply the matrix by inverse of another one + // and return the result in a separete matrix. + trans_affine operator / (const trans_affine& m) const + { + return trans_affine(*this).multiply_inv(m); + } + + // Calculate and return the inverse matrix + trans_affine operator ~ () const + { + trans_affine ret = *this; + return ret.invert(); + } + + // Equal operator with default epsilon + bool operator == (const trans_affine& m) const + { + return is_equal(m, affine_epsilon); + } + + // Not Equal operator with default epsilon + bool operator != (const trans_affine& m) const + { + return !is_equal(m, affine_epsilon); + } + + //-------------------------------------------- Transformations + // Direct transformation of x and y + void transform(double* x, double* y) const; + + // Direct transformation of x and y, 2x2 matrix only, no translation + void transform_2x2(double* x, double* y) const; + + // Inverse transformation of x and y. It works slower than the + // direct transformation. For massive operations it's better to + // invert() the matrix and then use direct transformations. + void inverse_transform(double* x, double* y) const; + + //-------------------------------------------- Auxiliary + // Calculate the determinant of matrix + double determinant() const + { + return sx * sy - shy * shx; + } + + // Calculate the reciprocal of the determinant + double determinant_reciprocal() const + { + return 1.0 / (sx * sy - shy * shx); + } + + // Get the average scale (by X and Y). + // Basically used to calculate the approximation_scale when + // decomposinting curves into line segments. + double scale() const; + + // Check to see if the matrix is not degenerate + bool is_valid(double epsilon = affine_epsilon) const; + + // Check to see if it's an identity matrix + bool is_identity(double epsilon = affine_epsilon) const; + + // Check to see if two matrices are equal + bool is_equal(const trans_affine& m, double epsilon = affine_epsilon) const; + + // Determine the major parameters. Use with caution considering + // possible degenerate cases. + double rotation() const; + void translation(double* dx, double* dy) const; + void scaling(double* x, double* y) const; + void scaling_abs(double* x, double* y) const; + }; + + //------------------------------------------------------------------------ + inline void trans_affine::transform(double* x, double* y) const + { + double tmp = *x; + *x = tmp * sx + *y * shx + tx; + *y = tmp * shy + *y * sy + ty; + } + + //------------------------------------------------------------------------ + inline void trans_affine::transform_2x2(double* x, double* y) const + { + double tmp = *x; + *x = tmp * sx + *y * shx; + *y = tmp * shy + *y * sy; + } + + //------------------------------------------------------------------------ + inline void trans_affine::inverse_transform(double* x, double* y) const + { + double d = determinant_reciprocal(); + double a = (*x - tx) * d; + double b = (*y - ty) * d; + *x = a * sy - b * shx; + *y = b * sx - a * shy; + } + + //------------------------------------------------------------------------ + inline double trans_affine::scale() const + { + double x = 0.707106781 * sx + 0.707106781 * shx; + double y = 0.707106781 * shy + 0.707106781 * sy; + return sqrt(x*x + y*y); + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::translate(double x, double y) + { + tx += x; + ty += y; + return *this; + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::rotate(double a) + { + double ca = cos(a); + double sa = sin(a); + double t0 = sx * ca - shy * sa; + double t2 = shx * ca - sy * sa; + double t4 = tx * ca - ty * sa; + shy = sx * sa + shy * ca; + sy = shx * sa + sy * ca; + ty = tx * sa + ty * ca; + sx = t0; + shx = t2; + tx = t4; + return *this; + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::scale(double x, double y) + { + double mm0 = x; // Possible hint for the optimizer + double mm3 = y; + sx *= mm0; + shx *= mm0; + tx *= mm0; + shy *= mm3; + sy *= mm3; + ty *= mm3; + return *this; + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::scale(double s) + { + double m = s; // Possible hint for the optimizer + sx *= m; + shx *= m; + tx *= m; + shy *= m; + sy *= m; + ty *= m; + return *this; + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::premultiply(const trans_affine& m) + { + trans_affine t = m; + return *this = t.multiply(*this); + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::multiply_inv(const trans_affine& m) + { + trans_affine t = m; + t.invert(); + return multiply(t); + } + + //------------------------------------------------------------------------ + inline const trans_affine& trans_affine::premultiply_inv(const trans_affine& m) + { + trans_affine t = m; + t.invert(); + return *this = t.multiply(*this); + } + + //------------------------------------------------------------------------ + inline void trans_affine::scaling_abs(double* x, double* y) const + { + // Used to calculate scaling coefficients in image resampling. + // When there is considerable shear this method gives us much + // better estimation than just sx, sy. + *x = sqrt(sx * sx + shx * shx); + *y = sqrt(shy * shy + sy * sy); + } + + //====================================================trans_affine_rotation + // Rotation matrix. sin() and cos() are calculated twice for the same angle. + // There's no harm because the performance of sin()/cos() is very good on all + // modern processors. Besides, this operation is not going to be invoked too + // often. + class trans_affine_rotation : public trans_affine + { + public: + trans_affine_rotation(double a) : + trans_affine(cos(a), sin(a), -sin(a), cos(a), 0.0, 0.0) + {} + }; + + //====================================================trans_affine_scaling + // Scaling matrix. x, y - scale coefficients by X and Y respectively + class trans_affine_scaling : public trans_affine + { + public: + trans_affine_scaling(double x, double y) : + trans_affine(x, 0.0, 0.0, y, 0.0, 0.0) + {} + + trans_affine_scaling(double s) : + trans_affine(s, 0.0, 0.0, s, 0.0, 0.0) + {} + }; + + //================================================trans_affine_translation + // Translation matrix + class trans_affine_translation : public trans_affine + { + public: + trans_affine_translation(double x, double y) : + trans_affine(1.0, 0.0, 0.0, 1.0, x, y) + {} + }; + + //====================================================trans_affine_skewing + // Sckewing (shear) matrix + class trans_affine_skewing : public trans_affine + { + public: + trans_affine_skewing(double x, double y) : + trans_affine(1.0, tan(y), tan(x), 1.0, 0.0, 0.0) + {} + }; + + + //===============================================trans_affine_line_segment + // Rotate, Scale and Translate, associating 0...dist with line segment + // x1,y1,x2,y2 + class trans_affine_line_segment : public trans_affine + { + public: + trans_affine_line_segment(double x1, double y1, double x2, double y2, + double dist) + { + double dx = x2 - x1; + double dy = y2 - y1; + if(dist > 0.0) + { + multiply(trans_affine_scaling(sqrt(dx * dx + dy * dy) / dist)); + } + multiply(trans_affine_rotation(atan2(dy, dx))); + multiply(trans_affine_translation(x1, y1)); + } + }; + + + //============================================trans_affine_reflection_unit + // Reflection matrix. Reflect coordinates across the line through + // the origin containing the unit vector (ux, uy). + // Contributed by John Horigan + class trans_affine_reflection_unit : public trans_affine + { + public: + trans_affine_reflection_unit(double ux, double uy) : + trans_affine(2.0 * ux * ux - 1.0, + 2.0 * ux * uy, + 2.0 * ux * uy, + 2.0 * uy * uy - 1.0, + 0.0, 0.0) + {} + }; + + + //=================================================trans_affine_reflection + // Reflection matrix. Reflect coordinates across the line through + // the origin at the angle a or containing the non-unit vector (x, y). + // Contributed by John Horigan + class trans_affine_reflection : public trans_affine_reflection_unit + { + public: + trans_affine_reflection(double a) : + trans_affine_reflection_unit(cos(a), sin(a)) + {} + + + trans_affine_reflection(double x, double y) : + trans_affine_reflection_unit(x / sqrt(x * x + y * y), y / sqrt(x * x + y * y)) + {} + }; + +} + + +#endif + diff --git a/kiva/markers/marker_renderer.h b/kiva/markers/marker_renderer.h new file mode 100644 index 000000000..3c7f2a323 --- /dev/null +++ b/kiva/markers/marker_renderer.h @@ -0,0 +1,130 @@ +// (C) Copyright 2005-2021 Enthought, Inc., Austin, TX +// All rights reserved. +// +// This software is provided without warranty under the terms of the BSD +// license included in LICENSE.txt and may be redistributed only under +// the conditions described in the aforementioned license. The license +// is also available online at http://www.enthought.com/licenses/BSD.txt +// +// Thanks for using Enthought open source! +#ifndef KIVA_MARKER_RENDERER_H +#define KIVA_MARKER_RENDERER_H + +#include +#include +#include +#include +#include + +namespace kiva_markers +{ + // This enumeration must match the marker constants in `kiva.constants`! + enum marker_type + { + MARKER_SQUARE = 1, + MARKER_DIAMOND, + MARKER_CIRCLE, + MARKER_CROSSED_CIRCLE, + MARKER_CROSS, + MARKER_TRIANGLE, + MARKER_INVERTED_TRIANGLE, + MARKER_PLUS, + MARKER_DOT, + MARKER_PIXEL, + }; + + class marker_renderer_base + { + public: + virtual ~marker_renderer_base(){} + virtual bool draw_markers(const double* pts, const unsigned Npts, + const unsigned size, const marker_type type, + const double* fill, const double* stroke) = 0; + virtual void transform(const double sx, const double sy, const double shx, + const double shy, const double tx, const double ty) = 0; + }; + + template + class marker_renderer : public marker_renderer_base + { + public: + marker_renderer(unsigned char* buf, + const unsigned width, const unsigned height, + const int stride, const bool bottom_up = false) + : m_renbuf(buf, width, height, bottom_up ? -stride : stride) + , m_pixfmt(m_renbuf) + , m_base_renderer(m_pixfmt) + , m_renderer(m_base_renderer) + {} + + virtual ~marker_renderer() {} + + bool draw_markers(const double* pts, const unsigned Npts, + const unsigned size, const marker_type type, + const double* fill, const double* stroke) + { + // Map from our marker type to the AGG marker type + const agg24markers::marker_e marker = _get_marker_type(type); + if (marker == agg24markers::end_of_markers) return false; + + // Assign fill and line colors + m_renderer.fill_color(agg24markers::rgba(fill[0], fill[1], fill[2], fill[3])); + m_renderer.line_color(agg24markers::rgba(stroke[0], stroke[1], stroke[2], stroke[3])); + + // NOTE: this is the average in X and Y + const double scale = m_transform.scale(); + + // Draw the markers + double mx, my; + for (unsigned i = 0; i < Npts*2; i+=2) + { + mx = pts[i]; + my = pts[i+1]; + m_transform.transform(&mx, &my); + m_renderer.marker(int(mx), int(my), size * scale, marker); + } + + return true; + } + + void transform(const double sx, const double sy, + const double shx, const double shy, + const double tx, const double ty) + { + m_transform.sx = sx; m_transform.sy = sy; + m_transform.shx = shx; m_transform.shy = shy; + m_transform.tx = tx; m_transform.ty = ty; + } + + private: + agg24markers::marker_e _get_marker_type(const marker_type type) const + { + switch (type) + { + case MARKER_SQUARE: return agg24markers::marker_square; + case MARKER_DIAMOND: return agg24markers::marker_diamond; + case MARKER_CIRCLE: return agg24markers::marker_circle; + case MARKER_CROSSED_CIRCLE: return agg24markers::marker_crossed_circle; + case MARKER_CROSS: return agg24markers::marker_x; + case MARKER_TRIANGLE: return agg24markers::marker_triangle_up; + case MARKER_INVERTED_TRIANGLE: return agg24markers::marker_triangle_down; + case MARKER_PLUS: return agg24markers::marker_cross; + case MARKER_DOT: return agg24markers::marker_dot; + case MARKER_PIXEL: return agg24markers::marker_pixel; + } + return agg24markers::end_of_markers; + } + + typedef agg24markers::renderer_base base_renderer_t; + typedef agg24markers::renderer_markers renderer_t; + + agg24markers::rendering_buffer m_renbuf; + pixfmt_t m_pixfmt; + base_renderer_t m_base_renderer; + renderer_t m_renderer; + agg24markers::trans_affine m_transform; + }; + +} // namespace kiva_markers + +#endif diff --git a/kiva/tests/test_marker_rendering.py b/kiva/tests/test_marker_rendering.py new file mode 100644 index 000000000..d31870ad0 --- /dev/null +++ b/kiva/tests/test_marker_rendering.py @@ -0,0 +1,142 @@ +# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in LICENSE.txt and may be redistributed only under +# the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# +# Thanks for using Enthought open source! +import contextlib +import unittest + +import numpy as np + +from kiva import constants +from kiva.marker_renderer import MarkerRenderer + + +class TestMarkerDrawing(unittest.TestCase): + @contextlib.contextmanager + def draw_and_check(self, buffer, check): + # Start with a white backgroud + buffer.fill(255) + yield + check(buffer) + + def exercise(self, renderer, buffer, check): + marker_names = ( + "SQUARE_MARKER", + "DIAMOND_MARKER", + "CIRCLE_MARKER", + "CROSSED_CIRCLE_MARKER", + "CROSS_MARKER", + "TRIANGLE_MARKER", + "INVERTED_TRIANGLE_MARKER", + "PLUS_MARKER", + "DOT_MARKER", + "PIXEL_MARKER", + ) + fill = (1.0, 0.0, 0.0, 1.0) + stroke = (0.0, 0.0, 0.0, 1.0) + count = 1000 + points = (np.random.random(size=count) * 300.0) + points = points.reshape(count // 2, 2) + + for name in marker_names: + with self.subTest(msg=name): + with self.draw_and_check(buffer, check): + marker = getattr(constants, name) + retval = renderer.draw_markers( + points, 5, marker, fill, stroke + ) + self.assertTrue(retval) + + def test_msb_alpha_32_bit(self): + pixel_formats = ("abgr32", "argb32") + + def check(image): + # Default is expected to be a totally white image. + # Therefore we check if the whole image is white. + if np.sum(image == [255, 255, 255, 255]) == (300 * 300 * 4): + self.fail("The image looks empty, no pixels were drawn") + + buffer = np.empty((300, 300, 4), dtype=np.uint8) + for pix_format in pixel_formats: + gc = MarkerRenderer(buffer, pix_format=pix_format) + self.exercise(gc, buffer, check) + + def test_lsb_alpha_32_bit(self): + pixel_formats = ("bgra32", "rgba32") + + def check(image): + # Default is expected to be a totally white image. + # Therefore we check if the whole image is white. + if np.sum(image == [255, 255, 255, 255]) == (300 * 300 * 4): + self.fail("The image looks empty, no pixels were drawn") + + buffer = np.empty((300, 300, 4), dtype=np.uint8) + for pix_format in pixel_formats: + gc = MarkerRenderer(buffer, pix_format=pix_format) + self.exercise(gc, buffer, check) + + def test_no_alpha_24_bit(self): + pixel_formats = ("bgr24", "rgb24") + + def check(image): + # Default is expected to be a totally white image. + # Therefore we check if the whole image is white. + if np.sum(image == [255, 255, 255]) == (300 * 300 * 3): + self.fail("The image looks empty, no pixels were drawn") + + buffer = np.empty((300, 300, 3), dtype=np.uint8) + for pix_format in pixel_formats: + gc = MarkerRenderer(buffer, pix_format=pix_format) + self.exercise(gc, buffer, check) + + def test_transformation(self): + fill = (1.0, 0.0, 0.0, 1.0) + stroke = (0.0, 0.0, 0.0, 1.0) + buffer = np.empty((100, 100, 3), dtype=np.uint8) + gc = MarkerRenderer(buffer, pix_format="rgb24") + + # Translate past the bounds + gc.transform(1.0, 1.0, 0.0, 0.0, 110, 110) + points = np.array([[0.0, 0.0]]) + buffer.fill(255) + gc.draw_markers(points, 5, constants.SQUARE_MARKER, fill, stroke) + # Transformed the point _out_ of the bounds. We expect nothing drawn + all_white = (np.sum(buffer == [255, 255, 255]) == buffer.size) + self.assertTrue(all_white) + + # Scale past the bounds + gc.transform(2.0, 2.0, 0.0, 0.0, 0.0, 0.0) + points = np.array([[90.0, 90.0]]) + gc.draw_markers(points, 5, constants.SQUARE_MARKER, fill, stroke) + # Transformed the point _out_ of the bounds. We expect nothing drawn + all_white = (np.sum(buffer == [255, 255, 255]) == buffer.size) + self.assertTrue(all_white) + + def test_bad_arguments(self): + fill = (1.0, 0.0, 0.0, 1.0) + stroke = (0.0, 0.0, 0.0, 1.0) + points = np.array([[1.0, 10.0], [50.0, 50.0], [42.0, 24.0]]) + buffer = np.empty((100, 100, 3), dtype=np.uint8) + gc = MarkerRenderer(buffer, pix_format="rgb24") + + # Input array shape checking + with self.assertRaises(ValueError): + gc.draw_markers(fill, 5, constants.PLUS_MARKER, fill, stroke) + with self.assertRaises(ValueError): + gc.draw_markers(points, 5, constants.PLUS_MARKER, fill[:2], stroke) + with self.assertRaises(ValueError): + gc.draw_markers(points, 5, constants.PLUS_MARKER, fill, stroke[:2]) + + # Argument type coercions + with self.assertRaises(TypeError): + gc.draw_markers(points, 5, "plus", fill, stroke) + with self.assertRaises(TypeError): + gc.draw_markers(points, [5], constants.PLUS_MARKER, fill, stroke) + + # Finally, check that drawing a bad marker ID returns False + self.assertFalse(gc.draw_markers(points, 5, 500, fill, stroke)) diff --git a/setup.py b/setup.py index 455d9d92e..fa42f22e3 100644 --- a/setup.py +++ b/setup.py @@ -292,6 +292,22 @@ def base_extensions(): include_dirs=['kiva', numpy.get_include()], language='c++', ), + Extension( + 'kiva._marker_renderer', + sources=['kiva/_marker_renderer.pyx'], + depends=[ + 'kiva/_marker_renderer.pxd', + ], + include_dirs=[ + os.path.join('kiva', 'markers', 'agg'), + os.path.join('kiva', 'markers'), + numpy.get_include(), + ], + define_macros=[ + ("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"), + ], + language='c++', + ), ]