From 918e480c6673813e8e537b39fac7800ccc3afbd0 Mon Sep 17 00:00:00 2001 From: VIFEX Date: Thu, 8 Jan 2026 22:23:34 +0800 Subject: [PATCH] feat(draw): add nanovg rendering backend (#8865) Signed-off-by: pengyiqiang --- Kconfig | 36 + docs/src/integration/embedded_linux/index.rst | 1 + .../src/integration/embedded_linux/nanovg.rst | 131 + .../src/integration/embedded_linux/opengl.rst | 9 + lv_conf_template.h | 23 + scripts/lv_conf_internal_gen.py | 5 + src/core/lv_refr.c | 1 + src/draw/lv_draw.c | 2 + src/draw/lv_draw_vector.c | 4 +- src/draw/nanovg/lv_draw_nanovg.c | 464 +++ src/draw/nanovg/lv_draw_nanovg.h | 48 + src/draw/nanovg/lv_draw_nanovg_arc.c | 177 ++ src/draw/nanovg/lv_draw_nanovg_border.c | 383 +++ src/draw/nanovg/lv_draw_nanovg_box_shadow.c | 92 + src/draw/nanovg/lv_draw_nanovg_fill.c | 77 + src/draw/nanovg/lv_draw_nanovg_grad.c | 188 ++ src/draw/nanovg/lv_draw_nanovg_image.c | 229 ++ src/draw/nanovg/lv_draw_nanovg_label.c | 384 +++ src/draw/nanovg/lv_draw_nanovg_layer.c | 74 + src/draw/nanovg/lv_draw_nanovg_line.c | 191 ++ src/draw/nanovg/lv_draw_nanovg_mask_rect.c | 81 + src/draw/nanovg/lv_draw_nanovg_private.h | 249 ++ src/draw/nanovg/lv_draw_nanovg_triangle.c | 85 + src/draw/nanovg/lv_draw_nanovg_vector.c | 312 ++ src/draw/nanovg/lv_nanovg_fbo_cache.c | 176 ++ src/draw/nanovg/lv_nanovg_fbo_cache.h | 85 + src/draw/nanovg/lv_nanovg_image_cache.c | 354 +++ src/draw/nanovg/lv_nanovg_image_cache.h | 81 + src/draw/nanovg/lv_nanovg_math.h | 102 + src/draw/nanovg/lv_nanovg_utils.c | 301 ++ src/draw/nanovg/lv_nanovg_utils.h | 189 ++ src/draw/snapshot/lv_snapshot.c | 5 + src/libs/nanovg/LICENSE.txt | 18 + src/libs/nanovg/nanovg.c | 2577 +++++++++++++++++ src/libs/nanovg/nanovg.h | 707 +++++ src/libs/nanovg/nanovg_gl.h | 1842 ++++++++++++ src/libs/nanovg/nanovg_gl_utils.h | 167 ++ src/lv_conf_internal.h | 58 + src/lv_conf_kconfig.h | 14 + src/misc/cache/instance/lv_image_cache.c | 3 + src/misc/lv_pending.c | 117 + src/misc/lv_pending.h | 85 + src/widgets/canvas/lv_canvas.c | 10 +- tests/src/lv_test_conf_full.h | 3 + 44 files changed, 10137 insertions(+), 3 deletions(-) create mode 100644 docs/src/integration/embedded_linux/nanovg.rst create mode 100644 src/draw/nanovg/lv_draw_nanovg.c create mode 100644 src/draw/nanovg/lv_draw_nanovg.h create mode 100644 src/draw/nanovg/lv_draw_nanovg_arc.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_border.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_box_shadow.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_fill.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_grad.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_image.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_label.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_layer.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_line.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_mask_rect.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_private.h create mode 100644 src/draw/nanovg/lv_draw_nanovg_triangle.c create mode 100644 src/draw/nanovg/lv_draw_nanovg_vector.c create mode 100644 src/draw/nanovg/lv_nanovg_fbo_cache.c create mode 100644 src/draw/nanovg/lv_nanovg_fbo_cache.h create mode 100644 src/draw/nanovg/lv_nanovg_image_cache.c create mode 100644 src/draw/nanovg/lv_nanovg_image_cache.h create mode 100644 src/draw/nanovg/lv_nanovg_math.h create mode 100644 src/draw/nanovg/lv_nanovg_utils.c create mode 100644 src/draw/nanovg/lv_nanovg_utils.h create mode 100644 src/libs/nanovg/LICENSE.txt create mode 100644 src/libs/nanovg/nanovg.c create mode 100644 src/libs/nanovg/nanovg.h create mode 100644 src/libs/nanovg/nanovg_gl.h create mode 100644 src/libs/nanovg/nanovg_gl_utils.h create mode 100644 src/misc/lv_pending.c create mode 100644 src/misc/lv_pending.h diff --git a/Kconfig b/Kconfig index 40bd102144..621f1a15fc 100644 --- a/Kconfig +++ b/Kconfig @@ -568,6 +568,38 @@ menu "LVGL configuration" default 2048 depends on LV_USE_DRAW_EVE + config LV_USE_DRAW_NANOVG + bool "Use NanoVG Renderer." + default n + depends on LV_USE_NANOVG && LV_USE_MATRIX + + choice LV_NANOVG_BACKEND + prompt "NanoVG OpenGL backend" + default LV_NANOVG_BACKEND_GLES2 + depends on LV_USE_DRAW_NANOVG + help + Select which OpenGL implementation to use for NanoVG rendering. + + config LV_NANOVG_BACKEND_GL2 + bool "OpenGL 2.0" + config LV_NANOVG_BACKEND_GL3 + bool "OpenGL 3.0+" + config LV_NANOVG_BACKEND_GLES2 + bool "OpenGL ES 2.0" + config LV_NANOVG_BACKEND_GLES3 + bool "OpenGL ES 3.0+" + endchoice + + config LV_NANOVG_IMAGE_CACHE_CNT + int "Draw image texture cache count" + default 128 + depends on LV_USE_DRAW_NANOVG + + config LV_NANOVG_LETTER_CACHE_CNT + int "Draw letter cache count" + default 512 + depends on LV_USE_DRAW_NANOVG + endmenu menu "Feature Configuration" @@ -1545,6 +1577,10 @@ menu "LVGL configuration" bool "Use ThorVG external" endchoice + config LV_USE_NANOVG + bool "NanoVG library" + default n + config LV_USE_LZ4 bool "Enable LZ4 compress/decompress lib" choice diff --git a/docs/src/integration/embedded_linux/index.rst b/docs/src/integration/embedded_linux/index.rst index 463727f968..80169c7c9a 100644 --- a/docs/src/integration/embedded_linux/index.rst +++ b/docs/src/integration/embedded_linux/index.rst @@ -9,5 +9,6 @@ Running under Embedded Linux overview opengl + nanovg os/index drivers/index diff --git a/docs/src/integration/embedded_linux/nanovg.rst b/docs/src/integration/embedded_linux/nanovg.rst new file mode 100644 index 0000000000..3712238cb0 --- /dev/null +++ b/docs/src/integration/embedded_linux/nanovg.rst @@ -0,0 +1,131 @@ +.. _nanovg_draw_unit: + +================ +NanoVG Draw Unit +================ + +Introduction +============ + +NanoVG is a lightweight, antialiased 2D vector graphics library built on top of OpenGL/OpenGL ES. +The NanoVG draw unit integrates NanoVG as a hardware-accelerated rendering backend for LVGL, +providing GPU-accelerated drawing for all standard LVGL widgets and graphics primitives. + +Unlike the software renderer, NanoVG leverages the GPU for: + +- Antialiased path rendering (rectangles, arcs, lines, triangles) +- Hardware-accelerated image compositing with rotation and scaling +- Efficient text rendering with font texture caching +- Box shadows and gradients +- Vector graphics support + +Requirements +============ + +- OpenGL 2.0+ / OpenGL ES 2.0+ / OpenGL ES 3.0+ +- An initialized OpenGL context (via GLFW, EGL, or custom setup) +- Stencil buffer support (8-bit recommended) + +Configuration +============= + +Enable the NanoVG draw unit in ``lv_conf.h``: + +.. code-block:: c + + /* Enable NanoVG library */ + #define LV_USE_NANOVG 1 + + /* Enable NanoVG draw unit */ + #define LV_USE_DRAW_NANOVG 1 + + /* Select OpenGL backend (choose one): + * - LV_NANOVG_BACKEND_GL2: OpenGL 2.0 + * - LV_NANOVG_BACKEND_GL3: OpenGL 3.0+ + * - LV_NANOVG_BACKEND_GLES2: OpenGL ES 2.0 + * - LV_NANOVG_BACKEND_GLES3: OpenGL ES 3.0+ + */ + #define LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GLES2 + + /* Optional: Adjust cache sizes */ + #define LV_NANOVG_IMAGE_CACHE_CNT 32 /* Image texture cache entries */ + #define LV_NANOVG_FBO_CACHE_CNT 8 /* Framebuffer object cache entries */ + +Supported Features +================== + +The NanoVG draw unit supports all standard LVGL drawing operations: + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Feature + - Description + * - Fill + - Solid colors, gradients (linear/radial) + * - Border + - Rounded rectangles with customizable width + * - Box Shadow + - Hardware-accelerated shadow rendering + * - Images + - Rotation, scaling, tiling, recoloring + * - Labels + - Font rendering with texture atlas caching + * - Lines + - Antialiased lines with configurable width + * - Arcs + - Antialiased arc segments + * - Triangles + - Filled triangles + * - Masks + - Rectangle masks for clipping + * - Layers + - Off-screen rendering with FBO + * - Canvas + - Direct drawing to canvas buffers + * - Vector Graphics + - SVG-style path rendering (requires ``LV_USE_VECTOR_GRAPHIC``) + +Supported Image Formats +======================= + +NanoVG supports zero-copy texture upload for these LVGL color formats: + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - LVGL Format + - GL Processing + - Notes + * - ``LV_COLOR_FORMAT_A8`` + - Alpha texture + - Color tinting via shader + * - ``LV_COLOR_FORMAT_ARGB8888`` + - BGR→RGB swizzle + - Premultiplication handled in shader + * - ``LV_COLOR_FORMAT_XRGB8888`` + - BGR→RGB + alpha=1 + - X channel ignored + * - ``LV_COLOR_FORMAT_RGB888`` + - BGR→RGB swizzle + - No alpha channel + * - ``LV_COLOR_FORMAT_RGB565`` + - Direct upload + - Note: LVGL uses BGR565 layout + +Performance Tips +================ + +1. **Minimize Layer Usage**: Each layer requires a framebuffer object (FBO) switch +2. **Use Premultiplied Alpha**: Set ``LV_IMAGE_FLAGS_PREMULTIPLIED`` for pre-processed images +3. **Cache Static Content**: NanoVG caches textures automatically; avoid recreating images +4. **Batch Similar Operations**: Group widgets with similar styles for better GPU batching + +Limitations +=========== + +- **Blur**: Not natively supported; Using this style will not affect the rendering results. +- **Complex Gradients**: Limited to 2-color gradients (LVGL supports multi-stop) +- **Layer Readback**: ``glReadPixels`` for canvas/layer is relatively slow diff --git a/docs/src/integration/embedded_linux/opengl.rst b/docs/src/integration/embedded_linux/opengl.rst index cc4c629ce6..dc51dd8fab 100644 --- a/docs/src/integration/embedded_linux/opengl.rst +++ b/docs/src/integration/embedded_linux/opengl.rst @@ -79,3 +79,12 @@ animations, and interactive camera controls for embedded 3D visualization. For complete implementation details, see :ref:`glTF `. +NanoVG Draw Unit +================ + +The NanoVG draw unit provides a hardware-accelerated 2D vector graphics rendering backend for LVGL. +It leverages GPU capabilities for antialiased path rendering, efficient image compositing, and text rendering. + +For complete implementation details, see :ref:`NanoVG Draw Unit `. + + diff --git a/lv_conf_template.h b/lv_conf_template.h index 0d57fdb6f6..282d58630f 100644 --- a/lv_conf_template.h +++ b/lv_conf_template.h @@ -410,6 +410,26 @@ #define LV_DRAW_EVE_WRITE_BUFFER_SIZE 2048 #endif +/** Use NanoVG Renderer + * - Requires LV_USE_NANOVG, LV_USE_MATRIX. + */ +#define LV_USE_DRAW_NANOVG 0 +#if LV_USE_DRAW_NANOVG + /** Select OpenGL backend for NanoVG: + * - LV_NANOVG_BACKEND_GL2: OpenGL 2.0 + * - LV_NANOVG_BACKEND_GL3: OpenGL 3.0+ + * - LV_NANOVG_BACKEND_GLES2: OpenGL ES 2.0 + * - LV_NANOVG_BACKEND_GLES3: OpenGL ES 3.0+ + */ + #define LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GLES2 + + /** Draw image texture cache count. */ + #define LV_NANOVG_IMAGE_CACHE_CNT 128 + + /** Draw letter texture cache count. */ + #define LV_NANOVG_LETTER_CACHE_CNT 512 +#endif + /*======================= * FEATURE CONFIGURATION *=======================*/ @@ -1022,6 +1042,9 @@ * Requires LV_USE_VECTOR_GRAPHIC */ #define LV_USE_THORVG_EXTERNAL 0 +/** Enable NanoVG (vector graphics library) */ +#define LV_USE_NANOVG 0 + /** Use lvgl built-in LZ4 lib */ #define LV_USE_LZ4_INTERNAL 0 diff --git a/scripts/lv_conf_internal_gen.py b/scripts/lv_conf_internal_gen.py index 01de4dba64..48619e2cc8 100755 --- a/scripts/lv_conf_internal_gen.py +++ b/scripts/lv_conf_internal_gen.py @@ -71,6 +71,11 @@ fout.write( #define LV_NEMA_HAL_CUSTOM 0 #define LV_NEMA_HAL_STM32 1 +#define LV_NANOVG_BACKEND_GL2 1 +#define LV_NANOVG_BACKEND_GL3 2 +#define LV_NANOVG_BACKEND_GLES2 3 +#define LV_NANOVG_BACKEND_GLES3 4 + /** Handle special Kconfig options. */ #ifndef LV_KCONFIG_IGNORE #include "lv_conf_kconfig.h" diff --git a/src/core/lv_refr.c b/src/core/lv_refr.c index a96576eee3..e894e2f3bc 100644 --- a/src/core/lv_refr.c +++ b/src/core/lv_refr.c @@ -951,6 +951,7 @@ static void refr_area(const lv_area_t * area_p, int32_t y_offset) layer_i = layer_i->next; } + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_DELETED, tile_layer); if(disp_refr->layer_deinit) disp_refr->layer_deinit(disp_refr, tile_layer); } lv_free(tile_layers); diff --git a/src/draw/lv_draw.c b/src/draw/lv_draw.c index fc6fdd3338..eaf651a548 100644 --- a/src/draw/lv_draw.c +++ b/src/draw/lv_draw.c @@ -462,6 +462,7 @@ void lv_draw_layer_init(lv_layer_t * layer, lv_layer_t * parent_layer, lv_color_ layer->color_format = color_format; if(disp->layer_init) disp->layer_init(disp, layer); + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_CREATED, layer); if(disp->layer_head) { lv_layer_t * tail = disp->layer_head; @@ -669,6 +670,7 @@ static void cleanup_task(lv_draw_task_t * t, lv_display_t * disp) l2 = l2->next; } + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_DELETED, layer_drawn); if(disp->layer_deinit) { LV_PROFILER_DRAW_BEGIN_TAG("layer_deinit"); disp->layer_deinit(disp, layer_drawn); diff --git a/src/draw/lv_draw_vector.c b/src/draw/lv_draw_vector.c index 089554dedb..daa1606909 100644 --- a/src/draw/lv_draw_vector.c +++ b/src/draw/lv_draw_vector.c @@ -12,8 +12,8 @@ #if LV_USE_VECTOR_GRAPHIC -#if !((LV_USE_DRAW_SW && LV_USE_THORVG) || LV_USE_DRAW_VG_LITE || (LV_USE_NEMA_GFX && LV_USE_NEMA_VG)) - #error "LV_USE_VECTOR_GRAPHIC requires (LV_USE_DRAW_SW and LV_USE_THORVG) or LV_USE_DRAW_VG_LITE or (LV_USE_NEMA_GFX and LV_USE_NEMA_VG)" +#if !((LV_USE_DRAW_SW && LV_USE_THORVG) || LV_USE_DRAW_VG_LITE || (LV_USE_NEMA_GFX && LV_USE_NEMA_VG) || LV_USE_DRAW_NANOVG) + #error "LV_USE_VECTOR_GRAPHIC requires (LV_USE_DRAW_SW and LV_USE_THORVG) or LV_USE_DRAW_VG_LITE or (LV_USE_NEMA_GFX and LV_USE_NEMA_VG) or LV_USE_DRAW_NANOVG" #endif #include "../misc/lv_ll.h" diff --git a/src/draw/nanovg/lv_draw_nanovg.c b/src/draw/nanovg/lv_draw_nanovg.c new file mode 100644 index 0000000000..5bfa05109e --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg.c @@ -0,0 +1,464 @@ +/** + * @file lv_draw_nanovg.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg.h" + +#if LV_USE_DRAW_NANOVG + +#include "../../display/lv_display.h" +#include "../../core/lv_refr_private.h" +#include "lv_draw_nanovg_private.h" +#include "lv_nanovg_utils.h" +#include "lv_nanovg_image_cache.h" +#include "lv_nanovg_fbo_cache.h" + +#if LV_USE_OPENGLES && LV_USE_EGL + #include "../../drivers/opengles/lv_opengles_private.h" +#else + #define NANOVG_GL_STATIC_LINK +#endif + +#if defined(NANOVG_GL2_IMPLEMENTATION) + #ifdef NANOVG_GL_STATIC_LINK + #include + #endif + #define NVG_CTX_CREATE nvgCreateGL2 + #define NVG_CTX_DELETE nvgDeleteGL2 +#elif defined(NANOVG_GL3_IMPLEMENTATION) + #ifdef NANOVG_GL_STATIC_LINK + #include + #endif + #define NVG_CTX_CREATE nvgCreateGL3 + #define NVG_CTX_DELETE nvgDeleteGL3 +#elif defined(NANOVG_GLES2_IMPLEMENTATION) + #ifdef NANOVG_GL_STATIC_LINK + #include + #endif + #define NVG_CTX_CREATE nvgCreateGLES2 + #define NVG_CTX_DELETE nvgDeleteGLES2 +#elif defined(NANOVG_GLES3_IMPLEMENTATION) + #ifdef NANOVG_GL_STATIC_LINK + #include + #endif + #define NVG_CTX_CREATE nvgCreateGLES3 + #define NVG_CTX_DELETE nvgDeleteGLES3 +#else + #error "No NanoVG implementation defined" +#endif + +#include "../../libs/nanovg/nanovg_gl.h" +#include "../../libs/nanovg/nanovg_gl_utils.h" + +/* GL_BGRA may not be defined on all platforms */ +#ifndef GL_BGRA + #ifdef GL_BGRA_EXT + #define GL_BGRA GL_BGRA_EXT + #else + #define GL_BGRA 0x80E1 + #endif +#endif + +/********************* + * DEFINES + *********************/ + +#define NANOVG_DRAW_UNIT_ID 10 + +/********************** + * TYPEDEFS + **********************/ + +/********************** + * STATIC PROTOTYPES + **********************/ + +static int32_t draw_dispatch(lv_draw_unit_t * draw_unit, lv_layer_t * layer); +static int32_t draw_evaluate(lv_draw_unit_t * draw_unit, lv_draw_task_t * task); +static int32_t draw_delete(lv_draw_unit_t * draw_unit); +static void draw_event_cb(lv_event_t * e); + +/********************** + * STATIC VARIABLES + **********************/ + +/********************** + * MACROS + **********************/ + +/********************** + * GLOBAL FUNCTIONS + **********************/ + +void lv_draw_nanovg_init(void) +{ + lv_display_render_mode_t mode = lv_display_get_render_mode(NULL); + if(mode != LV_DISPLAY_RENDER_MODE_FULL) { + LV_LOG_ERROR("Detect render mode(%d) is not FULL. The rendering result may be incorrect.", mode); + } + + static bool initialized = false; + if(initialized) return; + initialized = true; + + lv_draw_nanovg_unit_t * unit = lv_draw_create_unit(sizeof(lv_draw_nanovg_unit_t)); + unit->base_unit.dispatch_cb = draw_dispatch; + unit->base_unit.evaluate_cb = draw_evaluate; + unit->base_unit.delete_cb = draw_delete; + unit->base_unit.event_cb = draw_event_cb; + unit->base_unit.name = "NANOVG"; + + unit->vg = NVG_CTX_CREATE(0); + LV_ASSERT_MSG(unit->vg != NULL, "NanoVG init failed"); + + lv_nanovg_utils_init(unit); + lv_nanovg_image_cache_init(unit); + lv_nanovg_fbo_cache_init(unit); + lv_draw_nanovg_label_init(unit); +} + +int lv_nanovg_fb_get_image_handle(struct NVGLUframebuffer * fb) +{ + LV_ASSERT_NULL(fb); + return fb->image; +} + +/********************** + * STATIC FUNCTIONS + **********************/ + +static void draw_execute(lv_draw_nanovg_unit_t * u, lv_draw_task_t * t) +{ + /* remember draw unit for access to unit's context */ + t->draw_unit = (lv_draw_unit_t *)u; + lv_layer_t * layer = t->target_layer; + + lv_matrix_t global_matrix; + lv_matrix_identity(&global_matrix); + if(layer->buf_area.x1 || layer->buf_area.y1) { + lv_matrix_translate(&global_matrix, -layer->buf_area.x1, -layer->buf_area.y1); + } + +#if LV_DRAW_TRANSFORM_USE_MATRIX + lv_matrix_t layer_matrix = t->matrix; + lv_matrix_multiply(&global_matrix, &layer_matrix); +#endif + + /* NanoVG will output premultiplied image, set the flag correspondingly. */ + if(layer->draw_buf) { + lv_draw_buf_set_flag(layer->draw_buf, LV_IMAGE_FLAGS_PREMULTIPLIED); + } + + nvgReset(u->vg); + lv_nanovg_transform(u->vg, &global_matrix); + + lv_nanovg_set_clip_area(u->vg, &t->clip_area); + + switch(t->type) { + case LV_DRAW_TASK_TYPE_FILL: + lv_draw_nanovg_fill(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_BORDER: + lv_draw_nanovg_border(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_BOX_SHADOW: + lv_draw_nanovg_box_shadow(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_LETTER: + lv_draw_nanovg_letter(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_LABEL: + lv_draw_nanovg_label(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_IMAGE: + lv_draw_nanovg_image(t, t->draw_dsc, &t->area, -1); + break; + + case LV_DRAW_TASK_TYPE_LAYER: + lv_draw_nanovg_layer(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_LINE: + lv_draw_nanovg_line(t, t->draw_dsc); + break; + + case LV_DRAW_TASK_TYPE_ARC: + lv_draw_nanovg_arc(t, t->draw_dsc, &t->area); + break; + + case LV_DRAW_TASK_TYPE_TRIANGLE: + lv_draw_nanovg_triangle(t, t->draw_dsc); + break; + + case LV_DRAW_TASK_TYPE_MASK_RECTANGLE: + lv_draw_nanovg_mask_rect(t, t->draw_dsc); + break; + +#if LV_USE_VECTOR_GRAPHIC + case LV_DRAW_TASK_TYPE_VECTOR: + lv_draw_nanovg_vector(t, t->draw_dsc); + break; +#endif + default: + LV_LOG_ERROR("unknown draw task type: %d", t->type); + break; + } +} + +static void on_layer_changed(lv_layer_t * new_layer) +{ + LV_PROFILER_DRAW_BEGIN; + + if(!new_layer->user_data) { + /* Bind the default framebuffer for normal rendering */ + nvgluBindFramebuffer(NULL); + LV_PROFILER_DRAW_END; + return; + } + + LV_PROFILER_BEGIN_TAG("nvgBindFramebuffer"); + nvgluBindFramebuffer(lv_nanovg_fbo_cache_entry_to_fb(new_layer->user_data)); + LV_PROFILER_END_TAG("nvgBindFramebuffer"); + + /* Clear the off-screen framebuffer */ + LV_PROFILER_DRAW_BEGIN_TAG("glClear"); + glClearColor(0, 0, 0, 0); + glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); + LV_PROFILER_DRAW_END_TAG("glClear"); + + LV_PROFILER_DRAW_END; +} + +static void on_layer_readback(lv_draw_nanovg_unit_t * u, lv_layer_t * layer) +{ + LV_PROFILER_DRAW_BEGIN; + LV_ASSERT_NULL(u); + LV_ASSERT_NULL(layer); + + lv_cache_entry_t * entry = layer->user_data; + + if(!entry) { + LV_LOG_WARN("No entry available for layer: %p", (void *)layer); + LV_PROFILER_DRAW_END; + return; + } + + if(!layer->draw_buf) { + LV_LOG_WARN("No draw buffer available for layer: %p", (void *)layer); + LV_PROFILER_DRAW_END; + return; + } + + struct NVGLUframebuffer * fb = lv_nanovg_fbo_cache_entry_to_fb(entry); + if(!fb) { + LV_LOG_ERROR("No framebuffer available for layer: %p", (void *)layer); + LV_PROFILER_DRAW_END; + return; + } + + /* Bind the FBO for reading */ + nvgluBindFramebuffer(fb); + + int32_t w = lv_area_get_width(&layer->buf_area); + int32_t h = lv_area_get_height(&layer->buf_area); + lv_draw_buf_t * draw_buf = layer->draw_buf; + + /* Read pixels from FBO */ + GLenum format; + GLenum type; + + /* OpenGL reads bottom-to-top, but LVGL expects top-to-bottom */ + switch(draw_buf->header.cf) { + case LV_COLOR_FORMAT_ARGB8888: + case LV_COLOR_FORMAT_XRGB8888: + case LV_COLOR_FORMAT_ARGB8888_PREMULTIPLIED: + format = GL_BGRA; + type = GL_UNSIGNED_BYTE; + break; + + case LV_COLOR_FORMAT_RGB888: + format = GL_RGB; + type = GL_UNSIGNED_BYTE; + break; + + case LV_COLOR_FORMAT_RGB565: + format = GL_RGB; + type = GL_UNSIGNED_SHORT_5_6_5; + break; + + default: + LV_LOG_WARN("Unsupported color format: %d", draw_buf->header.cf); + LV_PROFILER_DRAW_END; + return; + } + + for(int32_t y = 0; y < h; y++) { + /* Reverse Y coordinate */ + void * row = lv_draw_buf_goto_xy(draw_buf, 0, h - 1 - y); + LV_PROFILER_DRAW_BEGIN_TAG("glReadPixels"); + glReadPixels(0, y, w, 1, format, type, row); + LV_PROFILER_DRAW_END_TAG("glReadPixels"); + + if(draw_buf->header.cf == LV_COLOR_FORMAT_RGB888) { + /* Swizzle RGB -> BGR */ + lv_color_t * px = row; + for(int32_t x = 0; x < w; x++) { + uint8_t r = px->blue; + px->blue = px->red; + px->red = r; + px++; + } + } + } + + /* Bind back to default framebuffer */ + nvgluBindFramebuffer(NULL); + + /* Mark draw_buf as modified */ + lv_draw_buf_flush_cache(draw_buf, NULL); + + LV_PROFILER_DRAW_END; +} + +static int32_t draw_dispatch(lv_draw_unit_t * draw_unit, lv_layer_t * layer) +{ + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)draw_unit; + + lv_draw_task_t * t = lv_draw_get_available_task(layer, NULL, NANOVG_DRAW_UNIT_ID); + if(!t || t->preferred_draw_unit_id != NANOVG_DRAW_UNIT_ID) { + lv_nanovg_end_frame(u); + return LV_DRAW_UNIT_IDLE; + } + + if(u->current_layer != layer) { + on_layer_changed(layer); + u->current_layer = layer; + } + + if(!u->is_started) { + const int32_t buf_w = lv_area_get_width(&layer->buf_area); + const int32_t buf_h = lv_area_get_height(&layer->buf_area); + + glViewport(0, 0, buf_w, buf_h); + LV_PROFILER_DRAW_BEGIN_TAG("nvgBeginFrame"); + nvgBeginFrame(u->vg, buf_w, buf_h, 1.0f); + LV_PROFILER_DRAW_END_TAG("nvgBeginFrame"); + u->is_started = true; + } + + t->state = LV_DRAW_TASK_STATE_IN_PROGRESS; + + draw_execute(u, t); + + t->state = LV_DRAW_TASK_STATE_FINISHED; + + /*The draw unit is free now. Request a new dispatching as it can get a new task*/ + lv_draw_dispatch_request(); + + return 1; +} + +static int32_t draw_evaluate(lv_draw_unit_t * draw_unit, lv_draw_task_t * task) +{ + LV_UNUSED(draw_unit); + + switch(task->type) { + case LV_DRAW_TASK_TYPE_FILL: + case LV_DRAW_TASK_TYPE_BORDER: + case LV_DRAW_TASK_TYPE_BOX_SHADOW: + case LV_DRAW_TASK_TYPE_LETTER: + case LV_DRAW_TASK_TYPE_LABEL: + case LV_DRAW_TASK_TYPE_IMAGE: + case LV_DRAW_TASK_TYPE_LAYER: + case LV_DRAW_TASK_TYPE_LINE: + case LV_DRAW_TASK_TYPE_ARC: + case LV_DRAW_TASK_TYPE_TRIANGLE: + case LV_DRAW_TASK_TYPE_MASK_RECTANGLE: +#if LV_USE_VECTOR_GRAPHIC + case LV_DRAW_TASK_TYPE_VECTOR: +#endif + break; + + default: + /*The draw unit is not able to draw this task. */ + return 0; + } + + if(task->preference_score > 80) { + /* The draw unit is able to draw this task. */ + task->preference_score = 80; + task->preferred_draw_unit_id = NANOVG_DRAW_UNIT_ID; + } + + return 1; +} + +static int32_t draw_delete(lv_draw_unit_t * draw_unit) +{ + lv_draw_nanovg_unit_t * unit = (lv_draw_nanovg_unit_t *)draw_unit; + lv_draw_nanovg_label_deinit(unit); + lv_nanovg_fbo_cache_deinit(unit); + lv_nanovg_image_cache_deinit(unit); + lv_nanovg_utils_deinit(unit); + NVG_CTX_DELETE(unit->vg); + unit->vg = NULL; + return 0; +} + +static void draw_event_cb(lv_event_t * e) +{ + lv_draw_nanovg_unit_t * u = lv_event_get_current_target(e); + lv_layer_t * layer = lv_event_get_param(e); + + switch(lv_event_get_code(e)) { + case LV_EVENT_CANCEL: + LV_PROFILER_DRAW_BEGIN_TAG("nvgCancelFrame"); + nvgCancelFrame(u->vg); + LV_PROFILER_DRAW_END_TAG("nvgCancelFrame"); + lv_nanovg_clean_up(u); + break; + case LV_EVENT_CHILD_CREATED: { + /* The internal rendering uses RGBA format, which is switched to LVGL BGRA format during readback. */ + lv_cache_entry_t * entry = lv_nanovg_fbo_cache_get(u, lv_area_get_width(&layer->buf_area), + lv_area_get_height(&layer->buf_area), 0, NVG_TEXTURE_RGBA); + layer->user_data = entry; + } + break; + case LV_EVENT_CHILD_DELETED: { + lv_cache_entry_t * entry = layer->user_data; + if(entry) { + lv_nanovg_fbo_cache_release(u, entry); + layer->user_data = NULL; + } + + /** + * Clear current_layer if it's being deleted, so next dispatch + * will properly call on_layer_changed even if layer address is reused + */ + if(u->current_layer == layer) { + u->current_layer = NULL; + } + } + break; + case LV_EVENT_SCREEN_LOAD_START: + on_layer_readback(u, layer); + break; + case LV_EVENT_INVALIDATE_AREA: + lv_nanovg_image_cache_drop(u, lv_event_get_param(e)); + break; + default: + break; + } +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg.h b/src/draw/nanovg/lv_draw_nanovg.h new file mode 100644 index 0000000000..7b1205a398 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg.h @@ -0,0 +1,48 @@ +/** + * @file lv_draw_nanovg.h + * + */ + +#ifndef LV_DRAW_NANOVG_H +#define LV_DRAW_NANOVG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../misc/lv_types.h" + +#if LV_USE_DRAW_NANOVG + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * Initialize NanoVG rendering + */ +void lv_draw_nanovg_init(void); + +/********************** + * MACROS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_DRAW_NANOVG_H*/ diff --git a/src/draw/nanovg/lv_draw_nanovg_arc.c b/src/draw/nanovg/lv_draw_nanovg_arc.c new file mode 100644 index 0000000000..91ccfcc892 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_arc.c @@ -0,0 +1,177 @@ +/** + * @file lv_draw_nanovg_arc.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_math.h" +#include "lv_nanovg_utils.h" +#include "lv_nanovg_image_cache.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_arc(lv_draw_task_t * t, const lv_draw_arc_dsc_t * dsc, const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, coords, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + float start_angle = dsc->start_angle; + float end_angle = dsc->end_angle; + float sweep_angle = end_angle - start_angle; + + while(sweep_angle < 0) { + sweep_angle += 360; + } + + while(sweep_angle > 360) { + sweep_angle -= 360; + } + + /*If the angles are the same then there is nothing to draw*/ + if(nvg_math_is_zero(sweep_angle)) { + LV_PROFILER_DRAW_END; + return; + } + + nvgBeginPath(u->vg); + + float radius_out = dsc->radius; + float radius_in = dsc->radius - dsc->width; + float cx = dsc->center.x; + float cy = dsc->center.y; + + enum NVGwinding winding = NVG_CCW; + + if(nvg_math_is_equal(sweep_angle, 360)) { + nvgCircle(u->vg, cx, cy, radius_out); + + /* radius_in <= 0, normal fill circle */ + if(radius_in > 0) { + nvgCircle(u->vg, cx, cy, radius_in); + } + winding = NVG_CW; + } + else { + float start_angle_rad = NVG_MATH_RADIANS(start_angle); + float end_angle_rad = NVG_MATH_RADIANS(end_angle); + + if(radius_in > 0) { + /* radius_out start point */ + float start_x = radius_out * NVG_MATH_COSF(start_angle_rad) + cx; + float start_y = radius_out * NVG_MATH_SINF(start_angle_rad) + cy; + + /* radius_in start point */ + float end_x = radius_in * NVG_MATH_COSF(end_angle_rad) + cx; + float end_y = radius_in * NVG_MATH_SINF(end_angle_rad) + cy; + + nvgMoveTo(u->vg, start_x, start_y); + + /* radius_out arc */ + lv_nanovg_path_append_arc(u->vg, + cx, cy, + radius_out, + start_angle, + sweep_angle, + false); + + /* line to radius_in */ + nvgLineTo(u->vg, end_x, end_y); + + /* radius_in arc */ + lv_nanovg_path_append_arc(u->vg, + cx, cy, + radius_in, + end_angle, + -sweep_angle, + false); + + /* close arc */ + nvgClosePath(u->vg); + } + else { + /* draw a normal arc pie shape */ + lv_nanovg_path_append_arc(u->vg, cx, cy, radius_out, start_angle, sweep_angle, true); + } + + /* draw round */ + if(dsc->rounded && dsc->width > 0) { + float round_radius = radius_out > dsc->width ? dsc->width / 2.0f : radius_out / 2.0f; + float round_center = radius_out - round_radius; + float rcx1 = cx + round_center * NVG_MATH_COSF(end_angle_rad); + float rcy1 = cy + round_center * NVG_MATH_SINF(end_angle_rad); + nvgCircle(u->vg, rcx1, rcy1, round_radius); + + float rcx2 = cx + round_center * NVG_MATH_COSF(start_angle_rad); + float rcy2 = cy + round_center * NVG_MATH_SINF(start_angle_rad); + nvgCircle(u->vg, rcx2, rcy2, round_radius); + } + } + + if(dsc->img_src) { + lv_image_header_t header; + int image_handle = lv_nanovg_image_cache_get_handle(u, dsc->img_src, lv_color32_make(0, 0, 0, 0), 0, &header); + if(image_handle < 0) { + LV_PROFILER_DRAW_END; + return; + } + + /* move image to center */ + float img_half_w = header.w / 2.0f; + float img_half_h = header.h / 2.0f; + + NVGpaint paint = nvgImagePattern(u->vg, + cx - img_half_w, cy - img_half_h, + header.w, header.h, 0, + image_handle, + dsc->opa / (float)LV_OPA_COVER); + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + } + else { + lv_nanovg_fill(u->vg, winding, NVG_SOURCE_OVER, lv_nanovg_color_convert(dsc->color, dsc->opa)); + } + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_border.c b/src/draw/nanovg/lv_draw_nanovg_border.c new file mode 100644 index 0000000000..75c2279d17 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_border.c @@ -0,0 +1,383 @@ +/** + * @file lv_draw_nanovg_border.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_math.h" + +/********************* +* DEFINES +*********************/ + +#define HAS_BORDER_SIDE(dsc_side, side) (((dsc_side) & (side)) == (side)) + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +static enum NVGwinding path_append_inner_rect(NVGcontext * ctx, + const lv_draw_border_dsc_t * dsc, + int32_t x, int32_t y, int32_t w, int32_t h, + float r); + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_border(lv_draw_task_t * t, const lv_draw_border_dsc_t * dsc, const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, coords, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + int32_t w = lv_area_get_width(coords); + int32_t h = lv_area_get_height(coords); + float r_out = dsc->radius; + if(dsc->radius) { + float r_short = LV_MIN(w, h) / 2.0f; + r_out = LV_MIN(r_out, r_short); + } + + nvgBeginPath(u->vg); + + /* outer rect */ + lv_nanovg_path_append_rect(u->vg, + coords->x1, coords->y1, + w, h, + r_out); + + /* inner rect */ + enum NVGwinding winding = path_append_inner_rect(u->vg, dsc, coords->x1, coords->y1, w, h, r_out); + + lv_nanovg_fill( + u->vg, + winding, + NVG_SOURCE_OVER, + lv_nanovg_color_convert(dsc->color, dsc->opa)); + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +static enum NVGwinding path_append_inner_rect(NVGcontext * ctx, + const lv_draw_border_dsc_t * dsc, + int32_t x, int32_t y, int32_t w, int32_t h, + float r) +{ + LV_PROFILER_DRAW_BEGIN; + + const float half_w = w / 2.0f; + const float half_h = h / 2.0f; + const int32_t border_w = dsc->width; + const float border_w_max = LV_MIN(half_w, half_h); + + /* normal fill, no inner rect */ + if(border_w >= border_w_max) { + LV_PROFILER_DRAW_END; + return NVG_CCW; + } + + const float r_in = r - border_w; + + /* full border, simple rect */ + if(dsc->side == LV_BORDER_SIDE_FULL) { + lv_nanovg_path_append_rect(ctx, + x + border_w, y + border_w, + w - border_w * 2, h - border_w * 2, + r_in < 0 ? 0 : r_in); + LV_PROFILER_DRAW_END; + return NVG_CW; + } + + /* reset outer rect path */ + nvgBeginPath(ctx); + + /* no-radius case */ + if(dsc->radius <= 0) { + if(dsc->side & LV_BORDER_SIDE_TOP) { + lv_nanovg_path_append_rect(ctx, + x, + y, + w, + border_w, + 0); + } + if(dsc->side & LV_BORDER_SIDE_LEFT) { + lv_nanovg_path_append_rect(ctx, + x, + y, + border_w, + h, + 0); + } + if(dsc->side & LV_BORDER_SIDE_BOTTOM) { + lv_nanovg_path_append_rect(ctx, + x, + y + h - border_w, + w, + border_w, + 0); + } + if(dsc->side & LV_BORDER_SIDE_RIGHT) { + lv_nanovg_path_append_rect(ctx, + x + w - border_w, + y, + border_w, + h, + 0); + } + + LV_PROFILER_DRAW_END; + return NVG_CCW; + } + + /* coordinate reference map: https://github.com/lvgl/lvgl/pull/6796 */ + const float c1_x = x + r; + const float c1_y = y + r; + const float c2_x = x + w - r; + const float c2_y = c1_y; + const float c3_x = c2_x; + const float c3_y = y + h - r; + const float c4_x = c1_x; + const float c4_y = c3_y; + + /* When border_w > r, No need to calculate the intersection of the arc and the line */ + if(r_in <= 0) { + const float p1_x = x; + const float p1_y = y + border_w; + const float p2_x = x; + const float p2_y = y + r; + const float p3_x = x + r; + const float p3_y = y; + const float p4_x = x + border_w; + const float p4_y = y; + + const float p5_x = x + w - border_w; + const float p5_y = y; + const float p6_x = x + w - r; + const float p6_y = y; + const float p7_x = x + w; + const float p7_y = y + r; + const float p8_x = x + w; + const float p8_y = y + border_w; + + const float p9_x = x + w; + const float p9_y = y + h - border_w; + const float p10_x = x + w; + const float p10_y = y + h - r; + const float p11_x = x + w - r; + const float p11_y = y + h; + const float p12_x = x + w - border_w; + const float p12_y = y + h; + + const float p13_x = x + border_w; + const float p13_y = y + h; + const float p14_x = x + r; + const float p14_y = y + h; + const float p15_x = x; + const float p15_y = y + h - r; + const float p16_x = x; + const float p16_y = y + h - border_w; + + if(dsc->side & LV_BORDER_SIDE_BOTTOM) { + nvgMoveTo(ctx, p16_x, p16_y); + nvgLineTo(ctx, p9_x, p9_y); + nvgLineTo(ctx, p10_x, p10_y); + lv_nanovg_path_append_arc_right_angle(ctx, p10_x, p10_y, c3_x, c3_y, p11_x, p11_y); + nvgLineTo(ctx, p14_x, p14_y); + lv_nanovg_path_append_arc_right_angle(ctx, p14_x, p14_y, c4_x, c4_y, p15_x, p15_y); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_TOP) { + nvgMoveTo(ctx, p1_x, p1_y); + nvgLineTo(ctx, p2_x, p2_y); + lv_nanovg_path_append_arc_right_angle(ctx, p2_x, p2_y, c1_x, c1_y, p3_x, p3_y); + nvgLineTo(ctx, p6_x, p6_y); + lv_nanovg_path_append_arc_right_angle(ctx, p6_x, p6_y, c2_x, c2_y, p7_x, p7_y); + nvgLineTo(ctx, p8_x, p8_y); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_LEFT) { + nvgMoveTo(ctx, p4_x, p4_y); + nvgLineTo(ctx, p13_x, p13_y); + nvgLineTo(ctx, p14_x, p14_y); + lv_nanovg_path_append_arc_right_angle(ctx, p14_x, p14_y, c4_x, c4_y, p15_x, p15_y); + nvgLineTo(ctx, p2_x, p2_y); + lv_nanovg_path_append_arc_right_angle(ctx, p2_x, p2_y, c1_x, c1_y, p3_x, p3_y); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_RIGHT) { + nvgMoveTo(ctx, p5_x, p5_y); + nvgLineTo(ctx, p6_x, p6_y); + lv_nanovg_path_append_arc_right_angle(ctx, p6_x, p6_y, c2_x, c2_y, p7_x, p7_y); + nvgLineTo(ctx, p10_x, p10_y); + lv_nanovg_path_append_arc_right_angle(ctx, p10_x, p10_y, c3_x, c3_y, p11_x, p11_y); + nvgLineTo(ctx, p12_x, p12_y); + nvgClosePath(ctx); + } + + LV_PROFILER_DRAW_END; + return NVG_CCW; + } + + /* When border_w < r, Calculate the intersection of an arc and a line */ + + /* r^2 - r_in^2 = offset^2 */ + const float offset = NVG_MATH_SQRTF((2 * r - border_w) * border_w); + const float sweep_alpha = NVG_MATH_DEGREES(NVG_MATH_ACOSF(r_in / r)); + const float sweep_beta = 90 - sweep_alpha; + + const float p1_x = x + border_w; + const float p1_y = y + r; + const float p2_x = x; + const float p2_y = y + r; + const float p3_x = x + border_w; + const float p3_y = y + r - offset; + const float p4_x = x + r - offset; + const float p4_y = y + border_w; + const float p5_x = x + r; + const float p5_y = y; + const float p6_x = x + r; + const float p6_y = y + border_w; + + const float p7_x = x + w - r; + const float p7_y = y + border_w; + const float p8_x = x + w - r; + const float p8_y = y; + const float p10_x = x + w - border_w; + const float p10_y = y + r - offset; + const float p11_x = x + w; + const float p11_y = y + r; + const float p12_x = x + w - border_w; + const float p12_y = y + r; + + const float p13_x = x + w - border_w; + const float p13_y = y + h - r; + const float p14_x = x + w; + const float p14_y = y + h - r; + const float p16_x = x + w - r + offset; + const float p16_y = y + h - border_w; + const float p17_x = x + w - r; + const float p17_y = y + h; + const float p18_x = x + w - r; + const float p18_y = y + h - border_w; + + const float p19_x = x + r; + const float p19_y = y + h - border_w; + const float p20_x = x + r; + const float p20_y = y + h; + const float p21_x = x + r - offset; + const float p21_y = y + h - border_w; + const float p22_x = x + border_w; + const float p22_y = y + h - r + offset; + const float p23_x = x; + const float p23_y = y + h - r; + const float p24_x = x + border_w; + const float p24_y = y + h - r; + + if(dsc->side & LV_BORDER_SIDE_BOTTOM) { + nvgMoveTo(ctx, p21_x, p21_y); + nvgLineTo(ctx, p16_x, p16_y); + lv_nanovg_path_append_arc(ctx, c3_x, c3_y, r, sweep_beta, sweep_alpha, false); + nvgLineTo(ctx, p20_x, p20_y); + lv_nanovg_path_append_arc(ctx, c4_x, c4_y, r, 90, sweep_alpha, false); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_TOP) { + nvgMoveTo(ctx, p4_x, p4_y); + lv_nanovg_path_append_arc(ctx, c1_x, c1_y, r, 270 - sweep_alpha, sweep_alpha, false); + nvgLineTo(ctx, p8_x, p8_y); + lv_nanovg_path_append_arc(ctx, c2_x, c2_y, r, 270, sweep_alpha, false); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_LEFT) { + nvgMoveTo(ctx, p3_x, p3_y); + nvgLineTo(ctx, p22_x, p22_y); + lv_nanovg_path_append_arc(ctx, c4_x, c4_y, r, 90 + sweep_beta, sweep_alpha, false); + nvgLineTo(ctx, p2_x, p2_y); + lv_nanovg_path_append_arc(ctx, c1_x, c1_y, r, 180, sweep_alpha, false); + nvgClosePath(ctx); + } + + if(dsc->side & LV_BORDER_SIDE_RIGHT) { + nvgMoveTo(ctx, p10_x, p10_y); + lv_nanovg_path_append_arc(ctx, c2_x, c2_y, r, 270 + sweep_beta, sweep_alpha, false); + nvgLineTo(ctx, p14_x, p14_y); + lv_nanovg_path_append_arc(ctx, c3_x, c3_y, r, 0, sweep_alpha, false); + nvgClosePath(ctx); + } + + /* Draw the rounded corners adjacent to the border */ + + if(HAS_BORDER_SIDE(dsc->side, LV_BORDER_SIDE_TOP | LV_BORDER_SIDE_LEFT)) { + nvgMoveTo(ctx, p2_x, p2_y); + lv_nanovg_path_append_arc_right_angle(ctx, p2_x, p2_y, c1_x, c1_y, p5_x, p5_y); + nvgLineTo(ctx, p6_x, p6_y); + lv_nanovg_path_append_arc_right_angle(ctx, p6_x, p6_y, c1_x, c1_y, p1_x, p1_y); + nvgClosePath(ctx); + } + + if(HAS_BORDER_SIDE(dsc->side, LV_BORDER_SIDE_TOP | LV_BORDER_SIDE_RIGHT)) { + nvgMoveTo(ctx, p8_x, p8_y); + lv_nanovg_path_append_arc_right_angle(ctx, p8_x, p8_y, c2_x, c2_y, p11_x, p11_y); + nvgLineTo(ctx, p12_x, p12_y); + lv_nanovg_path_append_arc_right_angle(ctx, p12_x, p12_y, c2_x, c2_y, p7_x, p7_y); + nvgClosePath(ctx); + } + + if(HAS_BORDER_SIDE(dsc->side, LV_BORDER_SIDE_BOTTOM | LV_BORDER_SIDE_LEFT)) { + nvgMoveTo(ctx, p20_x, p20_y); + lv_nanovg_path_append_arc_right_angle(ctx, p20_x, p20_y, c4_x, c4_y, p23_x, p23_y); + nvgLineTo(ctx, p24_x, p24_y); + lv_nanovg_path_append_arc_right_angle(ctx, p24_x, p24_y, c4_x, c4_y, p19_x, p19_y); + nvgClosePath(ctx); + } + + if(HAS_BORDER_SIDE(dsc->side, LV_BORDER_SIDE_BOTTOM | LV_BORDER_SIDE_RIGHT)) { + nvgMoveTo(ctx, p14_x, p14_y); + lv_nanovg_path_append_arc_right_angle(ctx, p14_x, p14_y, c3_x, c3_y, p17_x, p17_y); + nvgLineTo(ctx, p18_x, p18_y); + lv_nanovg_path_append_arc_right_angle(ctx, p18_x, p18_y, c3_x, c3_y, p13_x, p13_y); + nvgClosePath(ctx); + } + + LV_PROFILER_DRAW_END; + return NVG_CCW; +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_box_shadow.c b/src/draw/nanovg/lv_draw_nanovg_box_shadow.c new file mode 100644 index 0000000000..3d10eb573f --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_box_shadow.c @@ -0,0 +1,92 @@ +/** + * @file lv_draw_nanovg_box_shadow.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_box_shadow(lv_draw_task_t * t, const lv_draw_box_shadow_dsc_t * dsc, const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + + /*Calculate the rectangle which is blurred to get the shadow in `shadow_area`*/ + lv_area_t core_area; + core_area.x1 = coords->x1 + dsc->ofs_x - dsc->spread; + core_area.x2 = coords->x2 + dsc->ofs_x + dsc->spread; + core_area.y1 = coords->y1 + dsc->ofs_y - dsc->spread; + core_area.y2 = coords->y2 + dsc->ofs_y + dsc->spread; + + /*Calculate the bounding box of the shadow*/ + lv_area_t shadow_area; + shadow_area.x1 = core_area.x1 - dsc->width / 2 - 1; + shadow_area.x2 = core_area.x2 + dsc->width / 2 + 1; + shadow_area.y1 = core_area.y1 - dsc->width / 2 - 1; + shadow_area.y2 = core_area.y2 + dsc->width / 2 + 1; + + /*Get clipped draw area which is the real draw area. + *It is always the same or inside `shadow_area`*/ + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, &shadow_area, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + const NVGcolor icol = lv_nanovg_color_convert(dsc->color, dsc->opa); + const NVGcolor ocol = lv_nanovg_color_convert(lv_color_black(), 0); + + const int32_t w = lv_area_get_width(&shadow_area); + const int32_t h = lv_area_get_height(&shadow_area); + + NVGpaint paint = nvgBoxGradient( + u->vg, + shadow_area.x1, shadow_area.y1, + w, h, + dsc->radius, dsc->width, icol, ocol); + + nvgBeginPath(u->vg); + lv_nanovg_path_append_rect(u->vg, shadow_area.x1, shadow_area.y1, w, h, dsc->radius); + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_fill.c b/src/draw/nanovg/lv_draw_nanovg_fill.c new file mode 100644 index 0000000000..fe4482d425 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_fill.c @@ -0,0 +1,77 @@ +/** + * @file lv_draw_nanovg_fill.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +/********************** + * STATIC PROTOTYPES + **********************/ + +/********************** + * STATIC VARIABLES + **********************/ + +/********************** + * MACROS + **********************/ + +/********************** + * GLOBAL FUNCTIONS + **********************/ + +void lv_draw_nanovg_fill(lv_draw_task_t * t, const lv_draw_fill_dsc_t * dsc, const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, coords, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + nvgBeginPath(u->vg); + + lv_nanovg_path_append_rect(u->vg, + coords->x1, coords->y1, + lv_area_get_width(coords), lv_area_get_height(coords), + dsc->radius); + + if(dsc->grad.dir != LV_GRAD_DIR_NONE) { +#if LV_USE_VECTOR_GRAPHIC + lv_nanovg_draw_grad_helper(u->vg, coords, &dsc->grad, NVG_CCW, NVG_SOURCE_OVER); +#else + LV_LOG_WARN("Gradient fill is not supported without VECTOR_GRAPHIC"); +#endif + } + else { + lv_nanovg_fill(u->vg, NVG_CCW, NVG_SOURCE_OVER, lv_nanovg_color_convert(dsc->color, dsc->opa)); + } + + LV_PROFILER_DRAW_END; +} + +/********************** + * STATIC FUNCTIONS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ diff --git a/src/draw/nanovg/lv_draw_nanovg_grad.c b/src/draw/nanovg/lv_draw_nanovg_grad.c new file mode 100644 index 0000000000..38458b996e --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_grad.c @@ -0,0 +1,188 @@ +/** + * @file lv_draw_nanovg_grad.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG && LV_USE_VECTOR_GRAPHIC + +#include "../../draw/lv_draw_vector_private.h" + +#include "lv_nanovg_utils.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +bool lv_nanovg_grad_to_paint(NVGcontext * ctx, const lv_vector_gradient_t * grad, NVGpaint * paint) +{ + LV_PROFILER_DRAW_BEGIN; + + LV_ASSERT_NULL(grad); + LV_ASSERT_NULL(paint); + + if(grad->stops_count < 2) { + LV_LOG_WARN("stops_count(%d) should be 2 for gradient", grad->stops_count); + LV_PROFILER_DRAW_END; + return false; + } + + const NVGcolor icol = lv_nanovg_color_convert(grad->stops[0].color, grad->stops[0].opa); + const NVGcolor ocol = lv_nanovg_color_convert(grad->stops[1].color, grad->stops[1].opa); + + switch(grad->style) { + case LV_VECTOR_GRADIENT_STYLE_LINEAR: + *paint = nvgLinearGradient(ctx, grad->x1, grad->y1, grad->x2, grad->y2, icol, ocol); + break; + + case LV_VECTOR_GRADIENT_STYLE_RADIAL: { + const float inr = grad->cr * grad->stops[0].frac / 255; + const float outr = grad->cr * grad->stops[1].frac / 255; + *paint = nvgRadialGradient(ctx, grad->cx, grad->cy, inr, outr, icol, ocol); + } + break; + + default: + LV_LOG_WARN("Unsupported gradient style: %d", grad->style); + LV_PROFILER_DRAW_END; + return false; + } + + LV_PROFILER_DRAW_END; + return true; +} + +void lv_nanovg_draw_grad( + NVGcontext * ctx, + const lv_vector_gradient_t * grad, + enum NVGwinding winding, + enum NVGcompositeOperation composite_operation) +{ + LV_PROFILER_DRAW_BEGIN; + + NVGpaint paint; + if(!lv_nanovg_grad_to_paint(ctx, grad, &paint)) { + LV_PROFILER_DRAW_END; + return; + } + + nvgPathWinding(ctx, winding); + nvgGlobalCompositeOperation(ctx, composite_operation); + nvgFillPaint(ctx, paint); + nvgFill(ctx); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_draw_grad_helper( + NVGcontext * ctx, + const lv_area_t * area, + const lv_grad_dsc_t * grad_dsc, + enum NVGwinding winding, + enum NVGcompositeOperation composite_operation) +{ + LV_ASSERT_NULL(ctx); + LV_ASSERT_NULL(area); + LV_ASSERT_NULL(grad_dsc); + + lv_vector_gradient_t grad; + lv_memzero(&grad, sizeof(grad)); + + grad.style = LV_VECTOR_GRADIENT_STYLE_LINEAR; + grad.stops_count = grad_dsc->stops_count; + lv_memcpy(grad.stops, grad_dsc->stops, sizeof(lv_grad_stop_t) * grad_dsc->stops_count); + + /*convert to spread mode*/ + switch(grad_dsc->extend) { + case LV_GRAD_EXTEND_PAD: + grad.spread = LV_VECTOR_GRADIENT_SPREAD_PAD; + break; + case LV_GRAD_EXTEND_REPEAT: + grad.spread = LV_VECTOR_GRADIENT_SPREAD_REPEAT; + break; + case LV_GRAD_EXTEND_REFLECT: + grad.spread = LV_VECTOR_GRADIENT_SPREAD_REFLECT; + break; + default: + LV_LOG_WARN("Unsupported gradient extend mode: %d", grad_dsc->extend); + grad.spread = LV_VECTOR_GRADIENT_SPREAD_PAD; + break; + } + + switch(grad_dsc->dir) { + case LV_GRAD_DIR_VER: + grad.x1 = area->x1; + grad.y1 = area->y1; + grad.x2 = area->x1; + grad.y2 = area->y2 + 1; + break; + + case LV_GRAD_DIR_HOR: + grad.x1 = area->x1; + grad.y1 = area->y1; + grad.x2 = area->x2 + 1; + grad.y2 = area->y1; + break; + + case LV_GRAD_DIR_LINEAR: { + int32_t w = lv_area_get_width(area); + int32_t h = lv_area_get_height(area); + + grad.x1 = lv_pct_to_px(grad_dsc->params.linear.start.x, w) + area->x1; + grad.y1 = lv_pct_to_px(grad_dsc->params.linear.start.y, h) + area->y1; + grad.x2 = lv_pct_to_px(grad_dsc->params.linear.end.x, w) + area->x1; + grad.y2 = lv_pct_to_px(grad_dsc->params.linear.end.y, h) + area->y1; + } + break; + + case LV_GRAD_DIR_RADIAL: { + grad.style = LV_VECTOR_GRADIENT_STYLE_RADIAL; + int32_t w = lv_area_get_width(area); + int32_t h = lv_area_get_height(area); + + grad.cx = lv_pct_to_px(grad_dsc->params.radial.focal.x, w) + area->x1; + grad.cy = lv_pct_to_px(grad_dsc->params.radial.focal.y, h) + area->y1; + int32_t end_extent_x = lv_pct_to_px(grad_dsc->params.radial.end_extent.x, w) + area->x1; + int32_t end_extent_y = lv_pct_to_px(grad_dsc->params.radial.end_extent.y, h) + area->y1; + grad.cr = LV_MAX(end_extent_x - grad.cx, end_extent_y - grad.cy); + } + break; + + default: + LV_LOG_WARN("Unsupported gradient direction: %d", grad_dsc->dir); + return; + } + + lv_nanovg_draw_grad(ctx, &grad, winding, composite_operation); +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_image.c b/src/draw/nanovg/lv_draw_nanovg_image.c new file mode 100644 index 0000000000..bf8b935da6 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_image.c @@ -0,0 +1,229 @@ +/** + * @file lv_draw_nanovg_image.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_math.h" +#include "lv_nanovg_image_cache.h" +#include "../lv_image_decoder_private.h" +#include "../lv_draw_image_private.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +static void image_dsc_to_matrix(lv_matrix_t * matrix, int32_t x, int32_t y, const lv_draw_image_dsc_t * dsc); +static bool is_power_of_2(uint32_t num); +static void fill_repeat_tile_image( + lv_draw_nanovg_unit_t * u, + const lv_draw_image_dsc_t * dsc, + const lv_area_t * coords, + lv_area_t tile_area, + const uint32_t img_w, + const uint32_t img_h, + const int image_handle); + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_image(lv_draw_task_t * t, const lv_draw_image_dsc_t * dsc, const lv_area_t * coords, + int image_handle) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, &t->_real_area, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + /* Use coords as the fallback image width and height */ + const uint32_t img_w = dsc->header.w ? dsc->header.w : lv_area_get_width(coords); + const uint32_t img_h = dsc->header.h ? dsc->header.h : lv_area_get_height(coords); + bool use_repeat_tile = false; + + if(image_handle < 0) { + int image_flags = 0; + + if(dsc->tile) { +#ifdef NANOVG_GLES2_IMPLEMENTATION + /* GLES2 does not support sampling non-power-of-2 textures in repeating mode. */ + if(!is_power_of_2(img_w) || !is_power_of_2(img_h)) { + LV_LOG_TRACE("Unsupported image size %" LV_PRIu32 " x %" LV_PRIu32 ". Skipping for repeat mode.", img_w, img_h); + use_repeat_tile = true; + } + else +#endif + { + LV_UNUSED(is_power_of_2); + image_flags |= NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY; + } + } + + image_handle = lv_nanovg_image_cache_get_handle(u, dsc->src, lv_color_to_32(dsc->recolor, dsc->opa), image_flags, NULL); + } + + if(image_handle < 0) { + LV_PROFILER_DRAW_END; + return; + } + + /* original image matrix */ + lv_matrix_t image_matrix; + lv_matrix_identity(&image_matrix); + image_dsc_to_matrix(&image_matrix, coords->x1, coords->y1, dsc); + lv_nanovg_transform(u->vg, &image_matrix); + + int32_t img_ofs_x = 0; + int32_t img_ofs_y = 0; + int32_t rect_w = img_w; + int32_t rect_h = img_h; + + if(dsc->tile) { + lv_area_t tile_area; + if(lv_area_get_width(&dsc->image_area) >= 0) { + tile_area = dsc->image_area; + } + else { + tile_area = *coords; + } + + if(use_repeat_tile) { + /* When alignment requirements are not met, simulate tiles by repeating the texture. */ + fill_repeat_tile_image(u, dsc, coords, tile_area, img_w, img_h, image_handle); + LV_PROFILER_DRAW_END; + return; + } + + img_ofs_x = tile_area.x1 - coords->x1; + img_ofs_y = tile_area.y1 - coords->y1; + rect_w = lv_area_get_width(coords); + rect_h = lv_area_get_height(coords); + } + + nvgBeginPath(u->vg); + lv_nanovg_path_append_rect(u->vg, 0, 0, rect_w, rect_h, dsc->clip_radius); + + NVGpaint paint = nvgImagePattern(u->vg, img_ofs_x, img_ofs_y, img_w, img_h, 0, image_handle, + dsc->opa / (float)LV_OPA_COVER); + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +static void image_dsc_to_matrix(lv_matrix_t * matrix, int32_t x, int32_t y, const lv_draw_image_dsc_t * dsc) +{ + LV_ASSERT_NULL(matrix); + LV_ASSERT_NULL(dsc); + + int32_t rotation = dsc->rotation; + int32_t scale_x = dsc->scale_x; + int32_t scale_y = dsc->scale_y; + + lv_matrix_translate(matrix, x, y); + + if(rotation != 0 || scale_x != LV_SCALE_NONE || scale_y != LV_SCALE_NONE) { + lv_point_t pivot = dsc->pivot; + lv_matrix_translate(matrix, pivot.x, pivot.y); + + if(rotation != 0) { + lv_matrix_rotate(matrix, rotation * 0.1f); + } + + if(scale_x != LV_SCALE_NONE || scale_y != LV_SCALE_NONE) { + lv_matrix_scale( + matrix, + (float)scale_x / LV_SCALE_NONE, + (float)scale_y / LV_SCALE_NONE); + } + + lv_matrix_translate(matrix, -pivot.x, -pivot.y); + } +} + +static bool is_power_of_2(uint32_t num) +{ + uint32_t n = num > 0 ? num - 1 : 0; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n++; + return n == num; +} + +static void fill_repeat_tile_image( + lv_draw_nanovg_unit_t * u, + const lv_draw_image_dsc_t * dsc, + const lv_area_t * coords, + lv_area_t tile_area, + const uint32_t img_w, + const uint32_t img_h, + const int image_handle) +{ + if(dsc->clip_radius) { + LV_LOG_WARN("Unsupported clip radius for repeat mode."); + } + + const int32_t tile_x_start = tile_area.x1; + while(tile_area.y1 <= coords->y2) { + while(tile_area.x1 <= coords->x2) { + const int32_t img_ofs_x = tile_area.x1 - coords->x1; + const int32_t img_ofs_y = tile_area.y1 - coords->y1; + + lv_area_t clipped_img_area; + if(lv_area_intersect(&clipped_img_area, &tile_area, coords)) { + nvgBeginPath(u->vg); + lv_nanovg_path_append_rect(u->vg, img_ofs_x, img_ofs_y, img_w, img_h, 0); + NVGpaint paint = nvgImagePattern(u->vg, img_ofs_x, img_ofs_y, img_w, img_h, 0, image_handle, + dsc->opa / (float)LV_OPA_COVER); + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + } + + tile_area.x1 += img_w; + tile_area.x2 += img_w; + } + + tile_area.y1 += img_h; + tile_area.y2 += img_h; + tile_area.x1 = tile_x_start; + tile_area.x2 = tile_x_start + img_w - 1; + } +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_label.c b/src/draw/nanovg/lv_draw_nanovg_label.c new file mode 100644 index 0000000000..1f1b1a2749 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_label.c @@ -0,0 +1,384 @@ +/** + * @file lv_draw_nanovg_label.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_image_cache.h" +#include "../lv_draw_label_private.h" +#include "../lv_draw_image_private.h" +#include "../../misc/cache/lv_cache_entry_private.h" +#include "../../misc/lv_pending.h" +#include "../../libs/freetype/lv_freetype.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +typedef struct { + /* context */ + lv_draw_nanovg_unit_t * u; + + /* key */ + lv_font_glyph_dsc_t g_dsc; + + /* value */ + int image_handle; +} letter_item_t; + +/********************** +* STATIC PROTOTYPES +**********************/ + +static void draw_letter_cb(lv_draw_task_t * t, lv_draw_glyph_dsc_t * glyph_draw_dsc, + lv_draw_fill_dsc_t * fill_draw_dsc, const lv_area_t * fill_area); + +static void letter_cache_release_cb(void * entry, void * user_data); +static bool letter_create_cb(letter_item_t * item, void * user_data); +static void letter_free_cb(letter_item_t * item, void * user_data); +static lv_cache_compare_res_t letter_compare_cb(const letter_item_t * lhs, const letter_item_t * rhs); + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_label_init(lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->letter_cache == NULL); + LV_ASSERT(u->letter_pending == NULL); + + const lv_cache_ops_t ops = { + .compare_cb = (lv_cache_compare_cb_t)letter_compare_cb, + .create_cb = (lv_cache_create_cb_t)letter_create_cb, + .free_cb = (lv_cache_free_cb_t)letter_free_cb, + }; + + u->letter_cache = lv_cache_create(&lv_cache_class_lru_rb_count, sizeof(letter_item_t), LV_NANOVG_LETTER_CACHE_CNT, ops); + lv_cache_set_name(u->letter_cache, "NVG_LETTER"); + u->letter_pending = lv_pending_create(sizeof(lv_cache_entry_t *), 4); + lv_pending_set_free_cb(u->letter_pending, letter_cache_release_cb, u->letter_cache); +} + +void lv_draw_nanovg_label_deinit(lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->letter_cache); + LV_ASSERT(u->letter_pending); + + lv_pending_destroy(u->letter_pending); + u->letter_pending = NULL; + + lv_cache_destroy(u->letter_cache, NULL); + u->letter_cache = NULL; +} + +void lv_draw_nanovg_letter(lv_draw_task_t * t, const lv_draw_letter_dsc_t * dsc, const lv_area_t * coords) +{ + LV_ASSERT_NULL(t); + LV_ASSERT_NULL(dsc); + LV_ASSERT_NULL(coords); + + if(dsc->opa <= LV_OPA_MIN) + return; + + LV_PROFILER_DRAW_BEGIN; + + lv_draw_glyph_dsc_t glyph_dsc; + lv_draw_glyph_dsc_init(&glyph_dsc); + glyph_dsc.opa = dsc->opa; + glyph_dsc.bg_coords = NULL; + glyph_dsc.color = dsc->color; + glyph_dsc.rotation = dsc->rotation; + glyph_dsc.pivot = dsc->pivot; + + lv_draw_unit_draw_letter(t, &glyph_dsc, &(lv_point_t) { + .x = coords->x1, .y = coords->y1 + }, + dsc->font, dsc->unicode, draw_letter_cb); + + if(glyph_dsc._draw_buf) { + lv_draw_buf_destroy(glyph_dsc._draw_buf); + glyph_dsc._draw_buf = NULL; + } + + LV_PROFILER_DRAW_END; +} + +void lv_draw_nanovg_label(lv_draw_task_t * t, const lv_draw_label_dsc_t * dsc, const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + lv_draw_label_iterate_characters(t, dsc, coords, draw_letter_cb); + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +static inline void convert_letter_matrix(lv_matrix_t * matrix, const lv_draw_glyph_dsc_t * dsc) +{ + lv_matrix_translate(matrix, dsc->letter_coords->x1, dsc->letter_coords->y1); + + if(!dsc->rotation) { + return; + } + + const lv_point_t pivot = { + .x = dsc->pivot.x, + .y = dsc->g->box_h + dsc->g->ofs_y + }; + lv_matrix_translate(matrix, pivot.x, pivot.y); + lv_matrix_rotate(matrix, dsc->rotation / 10.0f); + lv_matrix_translate(matrix, -pivot.x, -pivot.y); +} + +static bool draw_letter_clip_areas(lv_draw_task_t * t, const lv_draw_glyph_dsc_t * dsc, lv_area_t * letter_area, + lv_area_t * cliped_area) +{ + *letter_area = *dsc->letter_coords; + + if(dsc->rotation) { + const lv_point_t pivot = { + .x = dsc->pivot.x, + .y = dsc->g->box_h + dsc->g->ofs_y + }; + + lv_image_buf_get_transformed_area( + letter_area, + lv_area_get_width(dsc->letter_coords), + lv_area_get_height(dsc->letter_coords), + dsc->rotation, + LV_SCALE_NONE, + LV_SCALE_NONE, + &pivot); + lv_area_move(letter_area, dsc->letter_coords->x1, dsc->letter_coords->y1); + } + + if(!lv_area_intersect(cliped_area, &t->clip_area, letter_area)) { + return false; + } + + return true; +} + +static void draw_letter_bitmap(lv_draw_task_t * t, const lv_draw_glyph_dsc_t * dsc, int image_handle) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_area_t image_area; + lv_area_t clip_area; + if(!draw_letter_clip_areas(t, dsc, &image_area, &clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + if(!dsc->rotation) { + float x = dsc->letter_coords->x1; + float y = dsc->letter_coords->y1; + float w = lv_area_get_width(dsc->letter_coords); + float h = lv_area_get_height(dsc->letter_coords); + + NVGpaint paint = nvgImagePattern(u->vg, x, y, w, h, 0, image_handle, 1.0f); + paint.innerColor = paint.outerColor = nvgRGBA(dsc->color.red, dsc->color.green, dsc->color.blue, dsc->opa); + + nvgBeginPath(u->vg); + nvgRect(u->vg, x, y, w, h); + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + } + else { + /* TODO: draw rotated bitmap */ + } + + LV_PROFILER_DRAW_END; +} + +static inline int letter_get_image_handle(lv_draw_nanovg_unit_t * u, lv_font_glyph_dsc_t * g_dsc) +{ + LV_PROFILER_DRAW_BEGIN; + + letter_item_t search_key = { 0 }; + search_key.u = u; + search_key.g_dsc = *g_dsc; + search_key.g_dsc.entry = NULL; /* Exclude the cache entry from the key */ + + lv_cache_entry_t * cache_node_entry = lv_cache_acquire(u->letter_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + /* check if the cache is full */ + size_t free_size = lv_cache_get_free_size(u->letter_cache, NULL); + if(free_size == 0) { + LV_LOG_INFO("letter cache is full, release all pending cache entries"); + lv_nanovg_end_frame(u); + } + + cache_node_entry = lv_cache_acquire_or_create(u->letter_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + LV_LOG_ERROR("letter cache creating failed"); + LV_PROFILER_DRAW_END; + return -1; + } + } + + /* Add the new entry to the pending list */ + lv_pending_add(u->letter_pending, &cache_node_entry); + + letter_item_t * letter_item = lv_cache_entry_get_data(cache_node_entry); + + LV_PROFILER_DRAW_END; + return letter_item->image_handle; +} + +static void letter_cache_release_cb(void * entry, void * user_data) +{ + lv_cache_entry_t ** entry_p = entry; + lv_cache_t * cache = user_data; + lv_cache_release(cache, * entry_p, NULL); +} + +static bool letter_create_cb(letter_item_t * item, void * user_data) +{ + LV_PROFILER_DRAW_BEGIN; + LV_UNUSED(user_data); + lv_font_glyph_dsc_t * g_dsc = &item->g_dsc; + + const uint32_t w = g_dsc->box_w; + const uint32_t h = g_dsc->box_h; + + lv_draw_buf_t * image_buf = lv_nanovg_reshape_global_image(item->u, LV_COLOR_FORMAT_A8, w, h); + if(!image_buf) { + LV_PROFILER_DRAW_END; + return false; + } + + if(!lv_font_get_glyph_bitmap(g_dsc, image_buf)) { + LV_PROFILER_DRAW_END; + return false; + } + + LV_PROFILER_DRAW_BEGIN_TAG("nvgCreateImage"); + item->image_handle = nvgCreateImage(item->u->vg, w, h, 0, NVG_TEXTURE_ALPHA, lv_draw_buf_goto_xy(image_buf, 0, 0)); + LV_PROFILER_DRAW_END_TAG("nvgCreateImage"); + + LV_LOG_TRACE("image_handle: %d", item->image_handle); + LV_PROFILER_DRAW_END; + return true; +} + +static void letter_free_cb(letter_item_t * item, void * user_data) +{ + LV_UNUSED(user_data); + LV_PROFILER_DRAW_BEGIN; + LV_LOG_TRACE("image_handle: %d", item->image_handle); + nvgDeleteImage(item->u->vg, item->image_handle); + item->image_handle = -1; + LV_PROFILER_DRAW_END; +} + +static lv_cache_compare_res_t letter_compare_cb(const letter_item_t * lhs, const letter_item_t * rhs) +{ + int cmp_res = lv_memcmp(&lhs->g_dsc, &rhs->g_dsc, sizeof(lv_font_glyph_dsc_t)); + if(cmp_res != 0) { + return cmp_res > 0 ? 1 : -1; + } + + return 0; +} + + +static void draw_letter_cb(lv_draw_task_t * t, lv_draw_glyph_dsc_t * glyph_draw_dsc, + lv_draw_fill_dsc_t * fill_draw_dsc, const lv_area_t * fill_area) +{ + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + if(glyph_draw_dsc) { + switch(glyph_draw_dsc->format) { + case LV_FONT_GLYPH_FORMAT_A1: + case LV_FONT_GLYPH_FORMAT_A2: + case LV_FONT_GLYPH_FORMAT_A3: + case LV_FONT_GLYPH_FORMAT_A4: + case LV_FONT_GLYPH_FORMAT_A8: { + int image_handle = letter_get_image_handle(u, glyph_draw_dsc->g); + if(image_handle < 0) { + return; + } + + draw_letter_bitmap(t, glyph_draw_dsc, image_handle); + } + break; + +#if LV_USE_FREETYPE + case LV_FONT_GLYPH_FORMAT_VECTOR: { + if(lv_freetype_is_outline_font(glyph_draw_dsc->g->resolved_font)) { + if(!glyph_draw_dsc->glyph_data) { + return; + } + + /* TODO: draw_letter_outline(t, glyph_draw_dsc); */ + } + } + break; +#endif /* LV_USE_FREETYPE */ + + case LV_FONT_GLYPH_FORMAT_IMAGE: { + glyph_draw_dsc->glyph_data = lv_font_get_glyph_bitmap(glyph_draw_dsc->g, glyph_draw_dsc->_draw_buf); + if(!glyph_draw_dsc->glyph_data) { + return; + } + + lv_draw_image_dsc_t image_dsc; + lv_draw_image_dsc_init(&image_dsc); + image_dsc.opa = glyph_draw_dsc->opa; + image_dsc.src = glyph_draw_dsc->glyph_data; + image_dsc.rotation = glyph_draw_dsc->rotation; + lv_draw_nanovg_image(t, &image_dsc, glyph_draw_dsc->letter_coords, -1); + } + break; + +#if LV_USE_FONT_PLACEHOLDER + case LV_FONT_GLYPH_FORMAT_NONE: { + if(glyph_draw_dsc->bg_coords == NULL) break; + /* Draw a placeholder rectangle*/ + lv_draw_border_dsc_t border_draw_dsc; + lv_draw_border_dsc_init(&border_draw_dsc); + border_draw_dsc.opa = glyph_draw_dsc->opa; + border_draw_dsc.color = glyph_draw_dsc->color; + border_draw_dsc.width = 1; + lv_draw_nanovg_border(t, &border_draw_dsc, glyph_draw_dsc->bg_coords); + } + break; +#endif /* LV_USE_FONT_PLACEHOLDER */ + + default: + break; + } + } + + if(fill_draw_dsc && fill_area) { + lv_draw_nanovg_fill(t, fill_draw_dsc, fill_area); + } +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_layer.c b/src/draw/nanovg/lv_draw_nanovg_layer.c new file mode 100644 index 0000000000..8addcc1cc2 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_layer.c @@ -0,0 +1,74 @@ +/** + * @file lv_draw_nanovg_layer.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_fbo_cache.h" +#include "../lv_draw_image_private.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +/********************** + * STATIC PROTOTYPES + **********************/ + +/********************** + * STATIC VARIABLES + **********************/ + +/********************** + * MACROS + **********************/ + +/********************** + * GLOBAL FUNCTIONS + **********************/ + +void lv_draw_nanovg_layer(lv_draw_task_t * t, const lv_draw_image_dsc_t * draw_dsc, + const lv_area_t * coords) +{ + LV_PROFILER_DRAW_BEGIN; + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + lv_layer_t * layer = (lv_layer_t *)draw_dsc->src; + + if(!layer->user_data) { + LV_PROFILER_DRAW_END; + return; + } + + int image_handle = lv_nanovg_fb_get_image_handle(lv_nanovg_fbo_cache_entry_to_fb(layer->user_data)); + if(image_handle <= 0) { + LV_LOG_WARN("Invalid image handle: %d", image_handle); + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_image_dsc_t new_draw_dsc = *draw_dsc; + new_draw_dsc.src = NULL; + lv_draw_nanovg_image(t, &new_draw_dsc, coords, image_handle); + + lv_nanovg_end_frame(u); + + LV_PROFILER_DRAW_END; +} + +/********************** + * STATIC FUNCTIONS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ diff --git a/src/draw/nanovg/lv_draw_nanovg_line.c b/src/draw/nanovg/lv_draw_nanovg_line.c new file mode 100644 index 0000000000..84086ae46e --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_line.c @@ -0,0 +1,191 @@ +/** + * @file lv_draw_nanovg_line.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_math.h" + +/********************* +* DEFINES +*********************/ + +#define SQ(x) ((x) * (x)) + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_line(lv_draw_task_t * t, const lv_draw_line_dsc_t * dsc) +{ + LV_PROFILER_DRAW_BEGIN; + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + float p1_x = dsc->p1.x; + float p1_y = dsc->p1.y; + float p2_x = dsc->p2.x; + float p2_y = dsc->p2.y; + + if(p1_x == p2_x && p1_y == p2_y) { + LV_PROFILER_DRAW_END; + return; + } + + float half_w = dsc->width * 0.5f; + + lv_area_t rel_clip_area; + rel_clip_area.x1 = (int32_t)(LV_MIN(p1_x, p2_x) - half_w); + rel_clip_area.x2 = (int32_t)(LV_MAX(p1_x, p2_x) + half_w); + rel_clip_area.y1 = (int32_t)(LV_MIN(p1_y, p2_y) - half_w); + rel_clip_area.y2 = (int32_t)(LV_MAX(p1_y, p2_y) + half_w); + + if(!lv_area_intersect(&rel_clip_area, &rel_clip_area, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + int32_t dash_width = dsc->dash_width; + int32_t dash_gap = dsc->dash_gap; + int32_t dash_l = dash_width + dash_gap; + + float dx = p2_x - p1_x; + float dy = p2_y - p1_y; + float inv_dl = nvg_math_inv_sqrtf(SQ(dx) + SQ(dy)); + float w_dx = dsc->width * dy * inv_dl; + float w_dy = dsc->width * dx * inv_dl; + float w2_dx = w_dx / 2; + float w2_dy = w_dy / 2; + + int32_t ndash = 0; + if(dash_width && dash_l * inv_dl < 1.0f) { + ndash = (int32_t)((1.0f / inv_dl + dash_l - 1) / dash_l); + } + + nvgBeginPath(u->vg); + + /* head point */ + float head_start_x = p1_x + w2_dx; + float head_start_y = p1_y - w2_dy; + float head_end_x = p1_x - w2_dx; + float head_end_y = p1_y + w2_dy; + + /* tail point */ + float tail_start_x = p2_x - w2_dx; + float tail_start_y = p2_y + w2_dy; + float tail_end_x = p2_x + w2_dx; + float tail_end_y = p2_y - w2_dy; + + /* + head_start tail_end + *-----------------* + /| |\ + / | | \ + arc_c *( *p1 p2* )* arc_c + \ | | / + \| |/ + *-----------------* + head_end tail_start + */ + + /* move to start point */ + nvgMoveTo(u->vg, head_start_x, head_start_y); + + /* draw line head */ + if(dsc->round_start) { + float arc_cx = p1_x - w2_dy; + float arc_cy = p1_y - w2_dx; + + /* start 90deg arc */ + lv_nanovg_path_append_arc_right_angle(u->vg, + head_start_x, head_start_y, + p1_x, p1_y, + arc_cx, arc_cy); + + /* end 90deg arc */ + lv_nanovg_path_append_arc_right_angle(u->vg, + arc_cx, arc_cy, + p1_x, p1_y, + head_end_x, head_end_y); + } + else { + nvgLineTo(u->vg, head_end_x, head_end_y); + } + + /* draw line body */ + nvgLineTo(u->vg, tail_start_x, tail_start_y); + + /* draw line tail */ + if(dsc->round_end) { + float arc_cx = p2_x + w2_dy; + float arc_cy = p2_y + w2_dx; + lv_nanovg_path_append_arc_right_angle(u->vg, + tail_start_x, tail_start_y, + p2_x, p2_y, + arc_cx, arc_cy); + lv_nanovg_path_append_arc_right_angle(u->vg, + arc_cx, arc_cy, + p2_x, p2_y, + tail_end_x, tail_end_y); + } + else { + nvgLineTo(u->vg, tail_end_x, tail_end_y); + } + + /* close draw line body */ + nvgLineTo(u->vg, head_start_x, head_start_y); + + for(int32_t i = 0; i < ndash; i++) { + float start_x = p1_x - w2_dx + dx * (i * dash_l + dash_width) * inv_dl; + float start_y = p1_y + w2_dy + dy * (i * dash_l + dash_width) * inv_dl; + + nvgMoveTo(u->vg, start_x, start_y); + nvgLineTo(u->vg, + p1_x + w2_dx + dx * (i * dash_l + dash_width) * inv_dl, + p1_y - w2_dy + dy * (i * dash_l + dash_width) * inv_dl); + nvgLineTo(u->vg, + p1_x + w2_dx + dx * (i + 1) * dash_l * inv_dl, + p1_y - w2_dy + dy * (i + 1) * dash_l * inv_dl); + nvgLineTo(u->vg, + p1_x - w2_dx + dx * (i + 1) * dash_l * inv_dl, + p1_y + w2_dy + dy * (i + 1) * dash_l * inv_dl); + nvgLineTo(u->vg, start_x, start_y); + } + + lv_nanovg_fill( + u->vg, + NVG_CCW, + NVG_SOURCE_OVER, + lv_nanovg_color_convert(dsc->color, dsc->opa)); + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_mask_rect.c b/src/draw/nanovg/lv_draw_nanovg_mask_rect.c new file mode 100644 index 0000000000..c8f9216634 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_mask_rect.c @@ -0,0 +1,81 @@ +/** + * @file lv_draw_nanovg_mask_rect.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "../../draw/lv_draw_mask_private.h" +#include "lv_nanovg_utils.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_mask_rect(lv_draw_task_t * t, const lv_draw_mask_rect_dsc_t * dsc) +{ + LV_PROFILER_DRAW_BEGIN; + lv_area_t draw_area; + + if(!lv_area_intersect(&draw_area, &dsc->area, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + nvgBeginPath(u->vg); + + /* Nesting cropping regions using rounded rectangles and normal rectangles */ + lv_nanovg_path_append_rect( + u->vg, + dsc->area.x1, dsc->area.y1, + lv_area_get_width(&dsc->area), lv_area_get_height(&dsc->area), + dsc->radius); + lv_nanovg_path_append_rect( + u->vg, + t->clip_area.x1, t->clip_area.y1, + lv_area_get_width(&t->clip_area), lv_area_get_height(&t->clip_area), + 0); + + /* Use NVG_DESTINATION_IN (Sa * D) blending mode to make the corners transparent */ + lv_nanovg_fill( + u->vg, + NVG_CCW, + NVG_DESTINATION_IN, + nvgRGBA(0, 0, 0, 0)); + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_private.h b/src/draw/nanovg/lv_draw_nanovg_private.h new file mode 100644 index 0000000000..a2b3610177 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_private.h @@ -0,0 +1,249 @@ +/** + * @file lv_draw_nanovg_private.h + * + */ + +#ifndef LV_DRAW_NANOVG_PRIVATE_H +#define LV_DRAW_NANOVG_PRIVATE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../lv_conf_internal.h" + +#if LV_USE_DRAW_NANOVG +#include "../lv_draw.h" +#include "../lv_draw_private.h" +#include "../../draw/lv_draw_vector.h" +#include "../../draw/lv_draw_arc.h" +#include "../../draw/lv_draw_rect.h" +#include "../../draw/lv_draw_image.h" +#include "../../draw/lv_draw_label.h" +#include "../../draw/lv_draw_line.h" +#include "../../draw/lv_draw_triangle.h" +#include "../../misc/lv_area_private.h" + +#if !LV_USE_NANOVG +#error "Require LV_USE_NANOVG = 1" +#endif + +#if !LV_USE_MATRIX +#error "Require LV_USE_MATRIX = 1" +#endif + +#include "../../libs/nanovg/nanovg.h" + +/********************* + * DEFINES + *********************/ + +/* Select NanoVG OpenGL backend based on LV_NANOVG_BACKEND */ +#if LV_NANOVG_BACKEND == LV_NANOVG_BACKEND_GL2 +#define NANOVG_GL2_IMPLEMENTATION +#elif LV_NANOVG_BACKEND == LV_NANOVG_BACKEND_GL3 +#define NANOVG_GL3_IMPLEMENTATION +#elif LV_NANOVG_BACKEND == LV_NANOVG_BACKEND_GLES2 +#define NANOVG_GLES2_IMPLEMENTATION +#elif LV_NANOVG_BACKEND == LV_NANOVG_BACKEND_GLES3 +#define NANOVG_GLES3_IMPLEMENTATION +#else +#error "Invalid LV_NANOVG_BACKEND value" +#endif + +/********************** + * TYPEDEFS + **********************/ + +struct _lv_pending_t; +struct NVGLUframebuffer; + +typedef struct _lv_draw_nanovg_unit_t { + lv_draw_unit_t base_unit; + lv_layer_t * current_layer; + NVGcontext * vg; + bool is_started; + lv_draw_buf_t * image_buf; + + lv_cache_t * image_cache; + struct _lv_pending_t * image_pending; + lv_ll_t image_drop_ll; + const void * image_drop_src; + + lv_cache_t * letter_cache; + struct _lv_pending_t * letter_pending; + + lv_cache_t * fbo_cache; +} lv_draw_nanovg_unit_t; + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * Draw arc on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to an arc descriptor + * @param coords the coordinates of the arc + */ +void lv_draw_nanovg_arc(lv_draw_task_t * t, const lv_draw_arc_dsc_t * dsc, const lv_area_t * coords); + +/** + * Draw border on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a border descriptor + * @param coords the coordinates of the border + */ +void lv_draw_nanovg_border(lv_draw_task_t * t, const lv_draw_border_dsc_t * dsc, const lv_area_t * coords); + +/** + * Draw box on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a box descriptor + * @param coords the coordinates of the box + */ +void lv_draw_nanovg_box_shadow(lv_draw_task_t * t, const lv_draw_box_shadow_dsc_t * dsc, const lv_area_t * coords); + +/** + * Fill a rectangle on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a fill descriptor + * @param coords the coordinates of the rectangle + */ +void lv_draw_nanovg_fill(lv_draw_task_t * t, const lv_draw_fill_dsc_t * dsc, const lv_area_t * coords); + +/** + * Draw image on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to an image descriptor + * @param coords the coordinates of the image + * @param image_handle the handle of the image to draw + */ +void lv_draw_nanovg_image(lv_draw_task_t * t, const lv_draw_image_dsc_t * dsc, const lv_area_t * coords, + int image_handle); + +/** + * Initialize draw label on a NanoVG context + * @param u pointer to a NanoVG unit + */ +void lv_draw_nanovg_label_init(lv_draw_nanovg_unit_t * u); + +/** + * Deinitialize draw label on a NanoVG context + * @param u pointer to a NanoVG unit + */ +void lv_draw_nanovg_label_deinit(lv_draw_nanovg_unit_t * u); + +/** + * Draw letter on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a letter descriptor + * @param coords the coordinates of the letter + */ +void lv_draw_nanovg_letter(lv_draw_task_t * t, const lv_draw_letter_dsc_t * dsc, const lv_area_t * coords); + +/** + * Draw label on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a label descriptor + * @param coords the coordinates of the label + */ +void lv_draw_nanovg_label(lv_draw_task_t * t, const lv_draw_label_dsc_t * dsc, const lv_area_t * coords); + +/** + * Draw layer on a NanoVG context + * @param t pointer to a drawing task + * @param draw_dsc pointer to an image descriptor + * @param coords the coordinates of the layer + */ +void lv_draw_nanovg_layer(lv_draw_task_t * t, const lv_draw_image_dsc_t * draw_dsc, const lv_area_t * coords); + +/** + * Draw line on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a line descriptor + */ +void lv_draw_nanovg_line(lv_draw_task_t * t, const lv_draw_line_dsc_t * dsc); + +/** + * Draw triangle on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a triangle descriptor + */ +void lv_draw_nanovg_triangle(lv_draw_task_t * t, const lv_draw_triangle_dsc_t * dsc); + +/** + * Draw mask rectangles on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a mask descriptor + */ +void lv_draw_nanovg_mask_rect(lv_draw_task_t * t, const lv_draw_mask_rect_dsc_t * dsc); + +/** + * Get image handle from framebuffer + * @param fb the framebuffer to get the image handle from + * @return the image handle + */ +int lv_nanovg_fb_get_image_handle(struct NVGLUframebuffer * fb); + +#if LV_USE_VECTOR_GRAPHIC +/** + * Draw vector graphics on a NanoVG context + * @param t pointer to a drawing task + * @param dsc pointer to a vector descriptor + */ +void lv_draw_nanovg_vector(lv_draw_task_t * t, const lv_draw_vector_dsc_t * dsc); + +/** + * @brief Convert a gradient to a paint + * @param ctx the nanovg context + * @param grad the gradient descriptor + * @param paint the paint to store the result + */ +bool lv_nanovg_grad_to_paint(NVGcontext * ctx, const lv_vector_gradient_t * grad, NVGpaint * paint); + +/** + * @brief Draw a gradient + * @param ctx the nanovg context + * @param grad the gradient descriptor + * @param winding the fill rule + * @param composite_operation the blend mode + */ +void lv_nanovg_draw_grad( + NVGcontext * ctx, + const lv_vector_gradient_t * grad, + enum NVGwinding winding, + enum NVGcompositeOperation composite_operation); + +/** + * @brief Draw a gradient with helper + * @param ctx the nanovg context + * @param area the area to draw the gradient on + * @param grad_dsc the gradient descriptor + * @param winding the fill rule + * @param composite_operation the blend mode + */ +void lv_nanovg_draw_grad_helper( + NVGcontext * ctx, + const lv_area_t * area, + const lv_grad_dsc_t * grad_dsc, + enum NVGwinding winding, + enum NVGcompositeOperation composite_operation); + +#endif /*LV_USE_VECTOR_GRAPHIC*/ + +/********************** + * MACROS + **********************/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_USE_DRAW_NANOVG*/ + +#endif /*LV_DRAW_NANOVG_PRIVATE_H*/ diff --git a/src/draw/nanovg/lv_draw_nanovg_triangle.c b/src/draw/nanovg/lv_draw_nanovg_triangle.c new file mode 100644 index 0000000000..0a31d59034 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_triangle.c @@ -0,0 +1,85 @@ +/** + * @file lv_draw_nanovg_triangle.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_nanovg_utils.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_triangle(lv_draw_task_t * t, const lv_draw_triangle_dsc_t * dsc) +{ + LV_PROFILER_DRAW_BEGIN; + lv_area_t tri_area; + tri_area.x1 = (int32_t)LV_MIN3(dsc->p[0].x, dsc->p[1].x, dsc->p[2].x); + tri_area.y1 = (int32_t)LV_MIN3(dsc->p[0].y, dsc->p[1].y, dsc->p[2].y); + tri_area.x2 = (int32_t)LV_MAX3(dsc->p[0].x, dsc->p[1].x, dsc->p[2].x); + tri_area.y2 = (int32_t)LV_MAX3(dsc->p[0].y, dsc->p[1].y, dsc->p[2].y); + + lv_area_t clip_area; + if(!lv_area_intersect(&clip_area, &tri_area, &t->clip_area)) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + nvgBeginPath(u->vg); + nvgMoveTo(u->vg, dsc->p[0].x, dsc->p[0].y); + nvgLineTo(u->vg, dsc->p[1].x, dsc->p[1].y); + nvgLineTo(u->vg, dsc->p[2].x, dsc->p[2].y); + nvgClosePath(u->vg); + + if(dsc->grad.dir != LV_GRAD_DIR_NONE) { +#if LV_USE_VECTOR_GRAPHIC + lv_nanovg_draw_grad_helper(u->vg, &tri_area, &dsc->grad, NVG_CCW, NVG_SOURCE_OVER); +#else + LV_LOG_WARN("Gradient fill is not supported without VECTOR_GRAPHIC"); +#endif + } + else { /* normal fill */ + lv_nanovg_fill( + u->vg, + NVG_CCW, + NVG_SOURCE_OVER, + lv_nanovg_color_convert(dsc->color, dsc->opa)); + } + + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_draw_nanovg_vector.c b/src/draw/nanovg/lv_draw_nanovg_vector.c new file mode 100644 index 0000000000..e8bf634da0 --- /dev/null +++ b/src/draw/nanovg/lv_draw_nanovg_vector.c @@ -0,0 +1,312 @@ +/** + * @file lv_draw_nanovg_vector.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_draw_nanovg_private.h" + +#if LV_USE_DRAW_NANOVG && LV_USE_VECTOR_GRAPHIC + +#include "lv_nanovg_utils.h" +#include "lv_nanovg_image_cache.h" +#include "../lv_draw_vector_private.h" +#include "../lv_image_decoder_private.h" +#include +#include + +/********************* +* DEFINES +*********************/ + +#define OPA_MIX(opa1, opa2) LV_UDIV255((opa1) * (opa2)) + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +static void task_draw_cb(void * ctx, const lv_vector_path_t * path, const lv_vector_path_ctx_t * dsc); +static void lv_path_to_nvg(NVGcontext * ctx, const lv_vector_path_t * src, lv_fpoint_t * offset); +static enum NVGcompositeOperation lv_blend_to_nvg(lv_vector_blend_t blend); +static enum NVGwinding lv_fill_to_nvg(lv_vector_fill_t fill_rule); + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_draw_nanovg_vector(lv_draw_task_t * t, const lv_draw_vector_dsc_t * dsc) +{ + LV_PROFILER_DRAW_BEGIN; + if(dsc->task_list == NULL) { + LV_PROFILER_DRAW_END; + return; + } + + lv_layer_t * layer = dsc->base.layer; + if(layer->draw_buf == NULL) { + LV_PROFILER_DRAW_END; + return; + } + + lv_draw_nanovg_unit_t * u = (lv_draw_nanovg_unit_t *)t->draw_unit; + + nvgGlobalAlpha(u->vg, t->opa / (float)LV_OPA_COVER); + + lv_vector_for_each_destroy_tasks(dsc->task_list, task_draw_cb, u); + LV_PROFILER_DRAW_END; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +static NVGcolor lv_color32_to_nvg(lv_color32_t color, lv_opa_t opa) +{ + uint8_t a = LV_UDIV255(color.alpha * opa); + return nvgRGBA(color.red, color.green, color.blue, a); +} + +static void draw_fill(lv_draw_nanovg_unit_t * u, const lv_vector_fill_dsc_t * fill_dsc, const lv_fpoint_t * offset, + enum NVGcompositeOperation comp_op) +{ + LV_PROFILER_DRAW_BEGIN; + + const enum NVGwinding winding = lv_fill_to_nvg(fill_dsc->fill_rule); + + lv_nanovg_transform(u->vg, &fill_dsc->matrix); + + switch(fill_dsc->style) { + case LV_VECTOR_DRAW_STYLE_SOLID: { + lv_nanovg_fill(u->vg, winding, comp_op, lv_color32_to_nvg(fill_dsc->color, fill_dsc->opa)); + } + break; + case LV_VECTOR_DRAW_STYLE_PATTERN: { + const lv_draw_image_dsc_t * img_dsc = &fill_dsc->img_dsc; + lv_image_header_t header; + int image_handle = lv_nanovg_image_cache_get_handle(u, img_dsc->src, lv_color_to_32(img_dsc->recolor, + img_dsc->recolor_opa), 0, &header); + if(image_handle < 0) { + LV_PROFILER_DRAW_END; + return; + } + + float offset_x = 0; + float offset_y = 0; + + if(fill_dsc->fill_units == LV_VECTOR_FILL_UNITS_OBJECT_BOUNDING_BOX) { + offset_x = offset->x; + offset_y = offset->y; + } + + NVGpaint paint = nvgImagePattern(u->vg, offset_x, offset_y, header.w, header.h, 0, image_handle, + img_dsc->opa / (float)LV_OPA_COVER); + + nvgFillPaint(u->vg, paint); + nvgFill(u->vg); + } + break; + case LV_VECTOR_DRAW_STYLE_GRADIENT: { + lv_nanovg_draw_grad(u->vg, &fill_dsc->gradient, winding, comp_op); + } + break; + default: + LV_LOG_WARN("unsupported style: %d", fill_dsc->style); + break; + } + + LV_PROFILER_DRAW_END; +} + +static void draw_stroke(lv_draw_nanovg_unit_t * u, const lv_vector_stroke_dsc_t * stroke_dsc) +{ + LV_PROFILER_DRAW_BEGIN; + + lv_nanovg_transform(u->vg, &stroke_dsc->matrix); + + nvgStrokeColor(u->vg, lv_color32_to_nvg(stroke_dsc->color, stroke_dsc->opa)); + nvgStrokeWidth(u->vg, stroke_dsc->width); + + switch(stroke_dsc->style) { + case LV_VECTOR_DRAW_STYLE_SOLID: + break; + + case LV_VECTOR_DRAW_STYLE_GRADIENT: { + NVGpaint paint; + if(!lv_nanovg_grad_to_paint(u->vg, &stroke_dsc->gradient, &paint)) { + LV_PROFILER_DRAW_END; + return; + } + nvgStrokePaint(u->vg, paint); + } + break; + + default: + LV_LOG_WARN("unsupported style: %d", stroke_dsc->style); + break; + } + + nvgStroke(u->vg); + + LV_PROFILER_DRAW_END; +} + +static void task_draw_cb(void * ctx, const lv_vector_path_t * path, const lv_vector_path_ctx_t * dsc) +{ + LV_PROFILER_DRAW_BEGIN; + lv_draw_nanovg_unit_t * u = ctx; + + /* clear area */ + if(!path) { + NVGcolor c = lv_color32_to_nvg(dsc->fill_dsc.color, dsc->fill_dsc.opa); + nvgBeginPath(u->vg); + lv_nanovg_path_append_area(u->vg, &dsc->scissor_area); + lv_nanovg_fill(u->vg, NVG_CCW, NVG_COPY, c); + LV_PROFILER_DRAW_END; + return; + } + + if(dsc->fill_dsc.opa == LV_OPA_TRANSP && dsc->stroke_dsc.opa == LV_OPA_TRANSP) { + LV_LOG_TRACE("Full transparent, no need to draw"); + LV_PROFILER_DRAW_END; + return; + } + + nvgSave(u->vg); + lv_nanovg_transform(u->vg, &dsc->matrix); + + lv_fpoint_t offset = {0, 0}; + lv_path_to_nvg(u->vg, path, &offset); + + lv_nanovg_set_clip_area(u->vg, &dsc->scissor_area); + + const enum NVGcompositeOperation comp_op = lv_blend_to_nvg(dsc->blend_mode); + nvgGlobalCompositeOperation(u->vg, comp_op); + + if(dsc->fill_dsc.opa) { + draw_fill(u, &dsc->fill_dsc, &offset, comp_op); + } + + if(dsc->stroke_dsc.opa) { + draw_stroke(u, &dsc->stroke_dsc); + } + + nvgRestore(u->vg); + + LV_PROFILER_DRAW_END; +} + +static void lv_path_to_nvg(NVGcontext * ctx, const lv_vector_path_t * src, lv_fpoint_t * offset) +{ + LV_PROFILER_DRAW_BEGIN; + + float min_x = FLT_MAX; + float min_y = FLT_MAX; + float max_x = -FLT_MAX; + float max_y = -FLT_MAX; + +#define CMP_BOUNDS(point) \ + do { \ + if((point)->x < min_x) min_x = (point)->x; \ + if((point)->y < min_y) min_y = (point)->y; \ + if((point)->x > max_x) max_x = (point)->x; \ + if((point)->y > max_y) max_y = (point)->y; \ + } while(0) + + const lv_vector_path_op_t * ops = lv_array_front(&src->ops); + const lv_fpoint_t * point = lv_array_front(&src->points); + const uint32_t op_size = lv_array_size(&src->ops); + + nvgBeginPath(ctx); + + for(uint32_t i = 0; i < op_size; i++) { + switch(ops[i]) { + case LV_VECTOR_PATH_OP_MOVE_TO: { + nvgMoveTo(ctx, point->x, point->y); + CMP_BOUNDS(point); + point++; + } + break; + case LV_VECTOR_PATH_OP_LINE_TO: { + nvgLineTo(ctx, point->x, point->y); + CMP_BOUNDS(point); + point++; + } + break; + case LV_VECTOR_PATH_OP_QUAD_TO: { + nvgQuadTo(ctx, point[0].x, point[0].y, point[1].x, point[1].y); + CMP_BOUNDS(&point[0]); + CMP_BOUNDS(&point[1]); + point += 2; + } + break; + case LV_VECTOR_PATH_OP_CUBIC_TO: { + nvgBezierTo(ctx, point[0].x, point[0].y, point[1].x, point[1].y, point[2].x, point[2].y); + CMP_BOUNDS(&point[0]); + CMP_BOUNDS(&point[1]); + CMP_BOUNDS(&point[2]); + point += 3; + } + break; + case LV_VECTOR_PATH_OP_CLOSE: { + nvgClosePath(ctx); + } + break; + default: + LV_LOG_WARN("unknown op: %d", ops[i]); + break; + } + } + + offset->x = lroundf(min_x); + offset->y = lroundf(min_y); + LV_PROFILER_DRAW_END; +} + +static enum NVGcompositeOperation lv_blend_to_nvg(lv_vector_blend_t blend) +{ + switch(blend) { + case LV_VECTOR_BLEND_SRC_OVER: + return NVG_SOURCE_OVER; + case LV_VECTOR_BLEND_SRC_IN: + return NVG_SOURCE_IN; + case LV_VECTOR_BLEND_DST_OVER: + return NVG_DESTINATION_OVER; + case LV_VECTOR_BLEND_DST_IN: + return NVG_DESTINATION_IN; + case LV_VECTOR_BLEND_NONE: + return NVG_COPY; + default: + LV_LOG_INFO("Unknown supported blend mode: %d", blend); + return NVG_SOURCE_OVER; + } +} + +static enum NVGwinding lv_fill_to_nvg(lv_vector_fill_t fill_rule) +{ + switch(fill_rule) { + case LV_VECTOR_FILL_NONZERO: + return NVG_CCW; + case LV_VECTOR_FILL_EVENODD: + return NVG_CW; + default: + LV_LOG_WARN("Unknown supported fill rule: %d", fill_rule); + return NVG_CCW; + } +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_nanovg_fbo_cache.c b/src/draw/nanovg/lv_nanovg_fbo_cache.c new file mode 100644 index 0000000000..732e4e07ce --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_fbo_cache.c @@ -0,0 +1,176 @@ +/** + * @file lv_nanovg_fbo_cache.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_nanovg_fbo_cache.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_draw_nanovg_private.h" +#include "lv_nanovg_utils.h" +#include "../../libs/nanovg/nanovg_gl_utils.h" + +/********************* + * DEFINES + *********************/ + +#define LV_NANOVG_FBO_CACHE_CNT 4 + +/********************** + * TYPEDEFS + **********************/ + +typedef struct { + /* context */ + lv_draw_nanovg_unit_t * u; + + /* key */ + int width; + int height; + int flags; + enum NVGtexture format; + + /* value */ + struct NVGLUframebuffer * fbo; +} fbo_item_t; + +/********************** + * STATIC PROTOTYPES + **********************/ + +static bool fbo_create_cb(fbo_item_t * item, void * user_data); +static void fbo_free_cb(fbo_item_t * item, void * user_data); +static lv_cache_compare_res_t fbo_compare_cb(const fbo_item_t * lhs, const fbo_item_t * rhs); + +/********************** + * STATIC VARIABLES + **********************/ + +/********************** + * MACROS + **********************/ + +/********************** + * GLOBAL FUNCTIONS + **********************/ + +void lv_nanovg_fbo_cache_init(lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->fbo_cache == NULL); + + const lv_cache_ops_t ops = { + .compare_cb = (lv_cache_compare_cb_t)fbo_compare_cb, + .create_cb = (lv_cache_create_cb_t)fbo_create_cb, + .free_cb = (lv_cache_free_cb_t)fbo_free_cb, + }; + + u->fbo_cache = lv_cache_create(&lv_cache_class_lru_ll_count, sizeof(fbo_item_t), LV_NANOVG_FBO_CACHE_CNT, ops); + lv_cache_set_name(u->fbo_cache, "NVG_FBO"); +} + +void lv_nanovg_fbo_cache_deinit(lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->fbo_cache); + + lv_cache_destroy(u->fbo_cache, NULL); + u->fbo_cache = NULL; +} + +struct _lv_cache_entry_t * lv_nanovg_fbo_cache_get(lv_draw_nanovg_unit_t * u, int width, int height, int flags, + int format) +{ + LV_PROFILER_DRAW_BEGIN; + LV_ASSERT_NULL(u); + + fbo_item_t search_key = { 0 }; + search_key.u = u; + search_key.width = width; + search_key.height = height; + search_key.flags = flags; + search_key.format = format; + + lv_cache_entry_t * cache_node_entry = lv_cache_acquire(u->fbo_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + cache_node_entry = lv_cache_acquire_or_create(u->fbo_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + LV_LOG_ERROR("FBO cache creating failed"); + LV_PROFILER_DRAW_END; + return NULL; + } + } + + LV_PROFILER_DRAW_END; + return cache_node_entry; +} + +void lv_nanovg_fbo_cache_release(struct _lv_draw_nanovg_unit_t * u, struct _lv_cache_entry_t * entry) +{ + LV_ASSERT_NULL(u); + LV_ASSERT_NULL(entry); + lv_cache_release(u->fbo_cache, entry, NULL); +} + +struct NVGLUframebuffer * lv_nanovg_fbo_cache_entry_to_fb(struct _lv_cache_entry_t * entry) +{ + LV_ASSERT_NULL(entry); + fbo_item_t * fbo_item = lv_cache_entry_get_data(entry); + return fbo_item->fbo; +} + +/********************** + * STATIC FUNCTIONS + **********************/ + +static bool fbo_create_cb(fbo_item_t * item, void * user_data) +{ + LV_PROFILER_DRAW_BEGIN; + LV_UNUSED(user_data); + + item->fbo = nvgluCreateFramebuffer(item->u->vg, item->width, item->height, item->flags, item->format); + if(!item->fbo) { + LV_LOG_ERROR("Failed to create FBO"); + } + + LV_PROFILER_DRAW_END; + return item->fbo != NULL; +} + +static void fbo_free_cb(fbo_item_t * item, void * user_data) +{ + LV_PROFILER_DRAW_BEGIN; + LV_UNUSED(user_data); + + nvgluDeleteFramebuffer(item->fbo); + + LV_PROFILER_DRAW_END; +} + +static lv_cache_compare_res_t fbo_compare_cb(const fbo_item_t * lhs, const fbo_item_t * rhs) +{ + if(lhs->width != rhs->width) { + return lhs->width > rhs->width ? 1 : -1; + } + + if(lhs->height != rhs->height) { + return lhs->height > rhs->height ? 1 : -1; + } + + if(lhs->flags != rhs->flags) { + return lhs->flags > rhs->flags ? 1 : -1; + } + + if(lhs->format != rhs->format) { + return lhs->format > rhs->format ? 1 : -1; + } + + return 0; +} + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_nanovg_fbo_cache.h b/src/draw/nanovg/lv_nanovg_fbo_cache.h new file mode 100644 index 0000000000..df89a1aa0c --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_fbo_cache.h @@ -0,0 +1,85 @@ +/** + * @file lv_nanovg_fbo_cache.h + * + */ + +#ifndef LV_NANOVG_FBO_CACHE_H +#define LV_NANOVG_FBO_CACHE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../lv_conf_internal.h" + +#if LV_USE_DRAW_NANOVG + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +struct _lv_draw_nanovg_unit_t; +struct _lv_cache_entry_t; +struct NVGLUframebuffer; + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * @brief Initialize the FBO cache + * @param u pointer to the nanovg unit + */ +void lv_nanovg_fbo_cache_init(struct _lv_draw_nanovg_unit_t * u); + +/** + * @brief Deinitialize the FBO cache + * @param u pointer to the nanovg unit + */ +void lv_nanovg_fbo_cache_deinit(struct _lv_draw_nanovg_unit_t * u); + +/** + * @brief Get the FBO from the cache, create a new one if not found + * @param u pointer to the nanovg unit + * @param width the width of the FBO + * @param height the height of the FBO + * @param flags the FBO flags + * @param format the texture format + * @return the FBO cache entry, or NULL if not found + */ +struct _lv_cache_entry_t * lv_nanovg_fbo_cache_get(struct _lv_draw_nanovg_unit_t * u, int width, int height, int flags, + int format); + +/** + * @brief Release the FBO from the cache + * @param u pointer to the nanovg unit + * @param entry the FBO cache entry to release + */ +void lv_nanovg_fbo_cache_release(struct _lv_draw_nanovg_unit_t * u, struct _lv_cache_entry_t * entry); + +/** + * @brief Convert a cache entry to a framebuffer + * @param entry the FBO cache entry + * @return the framebuffer pointer + */ +struct NVGLUframebuffer * lv_nanovg_fbo_cache_entry_to_fb(struct _lv_cache_entry_t * entry); + +/********************** + * MACROS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_NANOVG_FBO_CACHE_H*/ diff --git a/src/draw/nanovg/lv_nanovg_image_cache.c b/src/draw/nanovg/lv_nanovg_image_cache.c new file mode 100644 index 0000000000..b8e5718b17 --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_image_cache.c @@ -0,0 +1,354 @@ +/** + * @file lv_nanovg_image_cache.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_nanovg_image_cache.h" + +#if LV_USE_DRAW_NANOVG + +#include "lv_draw_nanovg_private.h" +#include "lv_nanovg_utils.h" +#include "../lv_image_decoder_private.h" +#include "../../misc/lv_pending.h" +#include "../../misc/lv_iter.h" + +/********************* +* DEFINES +*********************/ + +/********************** +* TYPEDEFS +**********************/ + +typedef struct { + /* context */ + lv_draw_nanovg_unit_t * u; + + /* key */ + lv_draw_buf_t src_buf; + lv_color32_t color; + int image_flags; + + /* for drop search */ + const void * src; + lv_image_src_t src_type; + + /* value */ + int image_handle; +} image_item_t; + +/********************** +* STATIC PROTOTYPES +**********************/ + +static void image_cache_release_cb(void * entry, void * user_data); +static bool image_create_cb(image_item_t * item, void * user_data); +static void image_free_cb(image_item_t * item, void * user_data); +static lv_cache_compare_res_t image_compare_cb(const image_item_t * lhs, const image_item_t * rhs); +static void image_cache_drop_collect_cb(void * elem); + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_nanovg_image_cache_init(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->image_cache == NULL); + LV_ASSERT(u->image_pending == NULL); + + const lv_cache_ops_t ops = { + .compare_cb = (lv_cache_compare_cb_t)image_compare_cb, + .create_cb = (lv_cache_create_cb_t)image_create_cb, + .free_cb = (lv_cache_free_cb_t)image_free_cb, + }; + + u->image_cache = lv_cache_create(&lv_cache_class_lru_rb_count, sizeof(image_item_t), LV_NANOVG_IMAGE_CACHE_CNT, ops); + lv_cache_set_name(u->image_cache, "NVG_IMAGE"); + u->image_pending = lv_pending_create(sizeof(lv_cache_entry_t *), 4); + lv_pending_set_free_cb(u->image_pending, image_cache_release_cb, u->image_cache); + + lv_ll_init(&u->image_drop_ll, sizeof(image_item_t)); +} + +void lv_nanovg_image_cache_deinit(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_ASSERT(u->image_cache); + LV_ASSERT(u->image_pending); + + lv_pending_destroy(u->image_pending); + u->image_pending = NULL; + + lv_cache_destroy(u->image_cache, NULL); + u->image_cache = NULL; +} + +int lv_nanovg_image_cache_get_handle(struct _lv_draw_nanovg_unit_t * u, + const void * src, + lv_color32_t color, + int image_flags, + lv_image_header_t * header) +{ + LV_PROFILER_DRAW_BEGIN; + + LV_ASSERT_NULL(u); + LV_ASSERT_NULL(src); + + lv_image_decoder_args_t args; + lv_memzero(&args, sizeof(lv_image_decoder_args_t)); + + lv_image_decoder_dsc_t decoder_dsc; + lv_result_t res = lv_image_decoder_open(&decoder_dsc, src, &args); + if(res != LV_RESULT_OK) { + lv_image_src_t type = lv_image_src_get_type(src); + LV_UNUSED(type); + LV_LOG_WARN("Failed to open image: type: %d, src: %p (%s)", type, src, + type == LV_IMAGE_SRC_FILE ? (const char *)src : "var"); + LV_PROFILER_DRAW_END; + return -1; + } + + const lv_draw_buf_t * decoded = decoder_dsc.decoded; + if(decoded == NULL || decoded->data == NULL) { + lv_image_decoder_close(&decoder_dsc); + LV_LOG_ERROR("image data is NULL"); + LV_PROFILER_DRAW_END; + return -1; + } + + if(header) { + *header = decoder_dsc.header; + } + + image_item_t search_key = { 0 }; + search_key.u = u; + search_key.src_buf = *decoded; + search_key.color = color; + search_key.image_flags = image_flags; + search_key.src = src; + search_key.src_type = lv_image_src_get_type(src); + + lv_cache_entry_t * cache_node_entry = lv_cache_acquire(u->image_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + /* check if the cache is full */ + size_t free_size = lv_cache_get_free_size(u->image_cache, NULL); + if(free_size == 0) { + LV_LOG_INFO("image cache is full, release all pending cache entries"); + lv_nanovg_end_frame(u); + } + + cache_node_entry = lv_cache_acquire_or_create(u->image_cache, &search_key, NULL); + if(cache_node_entry == NULL) { + LV_LOG_ERROR("image cache creating failed"); + lv_image_decoder_close(&decoder_dsc); + LV_PROFILER_DRAW_END; + return -1; + } + } + + lv_image_decoder_close(&decoder_dsc); + + /* Add the new entry to the pending list */ + lv_pending_add(u->image_pending, &cache_node_entry); + + image_item_t * image_item = lv_cache_entry_get_data(cache_node_entry); + + LV_PROFILER_DRAW_END; + return image_item->image_handle; +} + +void lv_nanovg_image_cache_drop(struct _lv_draw_nanovg_unit_t * u, const void * src) +{ + LV_ASSERT_NULL(u); + LV_UNUSED(src); + if(src == NULL) { + lv_cache_drop_all(u->image_cache, NULL); + return; + } + + u->image_drop_src = src; + + lv_iter_t * iter = lv_cache_iter_create(u->image_cache); + LV_ASSERT_NULL(iter); + + /* Collect all cache entries that match the drop source */ + lv_iter_inspect(iter, image_cache_drop_collect_cb); + + image_item_t * drop_item; + LV_LL_READ(&u->image_drop_ll, drop_item) { + lv_cache_drop(u->image_cache, drop_item, NULL); + } + + lv_ll_clear(&u->image_drop_ll); + lv_iter_destroy(iter); + u->image_drop_src = NULL; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +static void image_cache_release_cb(void * entry, void * user_data) +{ + lv_cache_entry_t ** entry_p = entry; + lv_cache_t * cache = user_data; + lv_cache_release(cache, * entry_p, NULL); +} + +static bool image_create_cb(image_item_t * item, void * user_data) +{ + LV_UNUSED(user_data); + const uint32_t w = item->src_buf.header.w; + const uint32_t h = item->src_buf.header.h; + const lv_color_format_t cf = item->src_buf.header.cf; + const uint32_t stride = item->src_buf.header.stride; + enum NVGtexture nvg_tex_type = NVG_TEXTURE_BGRA; + + /* Determine texture type and pixel size based on color format */ + switch(cf) { + case LV_COLOR_FORMAT_A8: + nvg_tex_type = NVG_TEXTURE_ALPHA; + break; + + case LV_COLOR_FORMAT_ARGB8888: + case LV_COLOR_FORMAT_ARGB8888_PREMULTIPLIED: + nvg_tex_type = NVG_TEXTURE_BGRA; + break; + + case LV_COLOR_FORMAT_XRGB8888: + nvg_tex_type = NVG_TEXTURE_BGRX; + break; + + case LV_COLOR_FORMAT_RGB888: + nvg_tex_type = NVG_TEXTURE_BGR; + break; + + case LV_COLOR_FORMAT_RGB565: + nvg_tex_type = NVG_TEXTURE_RGB565; + break; + + default: + LV_LOG_ERROR("Unsupported image format: %d", cf); + return false; + } + + void * data = NULL; + + /* Check if stride is tightly packed */ + uint32_t tight_stride = (w * lv_color_format_get_bpp(cf) + 7) >> 3; + if(stride == tight_stride) { + /* Stride matches, use source buffer directly (zero-copy) */ + data = lv_draw_buf_goto_xy(&item->src_buf, 0, 0); + LV_LOG_TRACE("Image stride matches: %" LV_PRIu32, stride); + } + else { + /* Stride doesn't match, need to copy with tight alignment */ + lv_draw_buf_t * tmp_buf = lv_nanovg_reshape_global_image(item->u, cf, w, h); + if(!tmp_buf) { + LV_LOG_ERROR("Failed to allocate temp buffer for stride conversion"); + return false; + } + + lv_draw_buf_copy(tmp_buf, NULL, &item->src_buf, NULL); + data = lv_draw_buf_goto_xy(tmp_buf, 0, 0); + LV_LOG_TRACE("Image stride converted: %" LV_PRIu32 " -> %" LV_PRIu32, stride, tight_stride); + } + + int flags = item->image_flags; + if(cf == LV_COLOR_FORMAT_ARGB8888_PREMULTIPLIED + || lv_draw_buf_has_flag(&item->src_buf, LV_IMAGE_FLAGS_PREMULTIPLIED)) { + flags |= NVG_IMAGE_PREMULTIPLIED; + } + + LV_PROFILER_DRAW_BEGIN_TAG("nvgCreateImage"); + int image_handle = nvgCreateImage(item->u->vg, w, h, flags, nvg_tex_type, data); + LV_PROFILER_DRAW_END_TAG("nvgCreateImage"); + + if(image_handle < 0) { + return false; + } + + if(item->src_type == LV_IMAGE_SRC_FILE) { + item->src = lv_strdup(item->src); + LV_ASSERT_MALLOC(item->src); + } + + item->image_handle = image_handle; + return true; +} + +static void image_free_cb(image_item_t * item, void * user_data) +{ + LV_UNUSED(user_data); + LV_PROFILER_DRAW_BEGIN; + LV_LOG_TRACE("image_handle: %d", item->image_handle); + nvgDeleteImage(item->u->vg, item->image_handle); + item->image_handle = -1; + + if(item->src_type == LV_IMAGE_SRC_FILE) { + lv_free((void *)item->src); + item->src = NULL; + } + + item->src_type = LV_IMAGE_SRC_UNKNOWN; + + LV_PROFILER_DRAW_END; +} + +static lv_cache_compare_res_t image_compare_cb(const image_item_t * lhs, const image_item_t * rhs) +{ + if(lhs->image_flags != rhs->image_flags) { + return lhs->image_flags > rhs->image_flags ? 1 : -1; + } + + uint32_t lhs_color = *(uint32_t *)&lhs->color; + uint32_t rhs_color = *(uint32_t *)&rhs->color; + + if(lhs_color != rhs_color) { + return lhs_color > rhs_color ? 1 : -1; + } + + int cmp_res = lv_memcmp(&lhs->src_buf, &rhs->src_buf, sizeof(lv_draw_buf_t)); + if(cmp_res != 0) { + return cmp_res > 0 ? 1 : -1; + } + + return 0; +} + +static void image_cache_drop_collect_cb(void * elem) +{ + /** + * If the cache is deleted during the traversal process, + * it will cause iter to become invalid. + * Therefore, we will first add it to the drop collection list and postpone the deletion. + */ + LV_ASSERT_NULL(elem); + image_item_t * item = elem; + const void * src = item->u->image_drop_src; + LV_ASSERT_NULL(src); + lv_image_src_t src_type = lv_image_src_get_type(src); + + if((src_type == LV_IMAGE_SRC_FILE && lv_strcmp(item->src, src) == 0) + || (src_type == LV_IMAGE_SRC_VARIABLE && item->src == src)) { + image_item_t * drop_item = lv_ll_ins_tail(&item->u->image_drop_ll); + LV_ASSERT_MALLOC(drop_item); + *drop_item = *item; + } +} + +#endif /*LV_USE_DRAW_NANOVG*/ diff --git a/src/draw/nanovg/lv_nanovg_image_cache.h b/src/draw/nanovg/lv_nanovg_image_cache.h new file mode 100644 index 0000000000..aa7a0fd026 --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_image_cache.h @@ -0,0 +1,81 @@ +/** + * @file lv_nanovg_image_cache.h + * + */ + +#ifndef LV_NANOVG_IMAGE_CACHE_H +#define LV_NANOVG_IMAGE_CACHE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../lv_conf_internal.h" + +#if LV_USE_DRAW_NANOVG + +#include "../lv_draw_image_private.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +struct _lv_draw_nanovg_unit_t; + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * @brief Initialize the image cache + * @param u pointer to the nanovg unit + */ +void lv_nanovg_image_cache_init(struct _lv_draw_nanovg_unit_t * u); + +/** + * @brief Deinitialize the image cache + * @param u pointer to the nanovg unit + */ +void lv_nanovg_image_cache_deinit(struct _lv_draw_nanovg_unit_t * u); + +/** + * @brief Get the image handle from the cache, create a new one if not found + * @param u pointer to the nanovg unit + * @param src the source image data + * @param color the color to apply + * @param image_flags the image flags + * @param header the image header to fill (can be NULL) + * @return the image handle, or -1 on failure + */ +int lv_nanovg_image_cache_get_handle(struct _lv_draw_nanovg_unit_t * u, + const void * src, + lv_color32_t color, + int image_flags, + lv_image_header_t * header); + +/** + * @brief Drop the image from the cache + * @param u pointer to the nanovg unit + * @param src the source image data + */ +void lv_nanovg_image_cache_drop(struct _lv_draw_nanovg_unit_t * u, const void * src); + +/********************** + * MACROS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_NANOVG_IMAGE_CACHE_H*/ diff --git a/src/draw/nanovg/lv_nanovg_math.h b/src/draw/nanovg/lv_nanovg_math.h new file mode 100644 index 0000000000..44d6f02b9f --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_math.h @@ -0,0 +1,102 @@ +/** + * @file lv_nanovg_math.h + * + */ + +#ifndef LV_NANOVG_MATH_H +#define LV_NANOVG_MATH_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../lv_conf_internal.h" + +#if LV_USE_DRAW_NANOVG + +#include +#include +#include + +/********************* + * DEFINES + *********************/ + +#define NVG_MATH_PI 3.14159265358979323846f +#define NVG_MATH_HALF_PI 1.57079632679489661923f +#define NVG_MATH_TWO_PI 6.28318530717958647692f +#define NVG_DEG_TO_RAD 0.017453292519943295769236907684886f +#define NVG_RAD_TO_DEG 57.295779513082320876798154814105f + +#define NVG_MATH_TANF(x) tanf(x) +#define NVG_MATH_SINF(x) sinf(x) +#define NVG_MATH_COSF(x) cosf(x) +#define NVG_MATH_ASINF(x) asinf(x) +#define NVG_MATH_ACOSF(x) acosf(x) +#define NVG_MATH_FABSF(x) fabsf(x) +#define NVG_MATH_SQRTF(x) sqrtf(x) + +#define NVG_MATH_RADIANS(deg) ((deg) * NVG_DEG_TO_RAD) +#define NVG_MATH_DEGREES(rad) ((rad) * NVG_RAD_TO_DEG) + +/********************** + * TYPEDEFS + **********************/ + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * Check if the floating point number is zero + * @param a the number to check + * @return true if the number is zero, false otherwise + */ +static inline bool nvg_math_is_zero(float a) +{ + return (NVG_MATH_FABSF(a) < FLT_EPSILON); +} + +/** + * Check if two floating point numbers are equal + * @param a the first number + * @param b the second number + * @return true if the numbers are equal, false otherwise + */ +static inline bool nvg_math_is_equal(float a, float b) +{ + return nvg_math_is_zero(a - b); +} + +/** + * Calculate the inverse square root (1/sqrt(x)) + * @param number the input number + * @return the inverse square root + */ +static inline float nvg_math_inv_sqrtf(float number) +{ + /* From https://en.wikipedia.org/wiki/Fast_inverse_square_root#Avoiding_undefined_behavior */ + union { + float f; + int32_t i; + } conv = { .f = number }; + conv.i = 0x5f3759df - (conv.i >> 1); + conv.f *= 1.5F - (number * 0.5F * conv.f * conv.f); + return conv.f; +} + +/********************** + * MACROS + **********************/ + +#endif /*LV_USE_DRAW_NANOVG*/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_NANOVG_MATH_H*/ diff --git a/src/draw/nanovg/lv_nanovg_utils.c b/src/draw/nanovg/lv_nanovg_utils.c new file mode 100644 index 0000000000..c31a96f506 --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_utils.c @@ -0,0 +1,301 @@ +/** + * @file lv_nanovg_utils.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_nanovg_utils.h" + +#if LV_USE_DRAW_NANOVG + +#include "../../misc/lv_pending.h" +#include "lv_draw_nanovg_private.h" +#include "lv_nanovg_math.h" +#include +#include + +/********************* +* DEFINES +*********************/ + +/* Magic number from https://spencermortensen.com/articles/bezier-circle/ */ +#define PATH_ARC_MAGIC 0.55191502449351f + +#define SIGN(x) (nvg_math_is_zero(x) ? 0 : ((x) > 0 ? 1 : -1)) + +/********************** +* TYPEDEFS +**********************/ + +/********************** +* STATIC PROTOTYPES +**********************/ + +/********************** +* STATIC VARIABLES +**********************/ + +/********************** +* MACROS +**********************/ + +/********************** +* GLOBAL FUNCTIONS +**********************/ + +void lv_nanovg_utils_init(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); +} + +void lv_nanovg_utils_deinit(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + + if(u->image_buf) { + lv_draw_buf_destroy(u->image_buf); + u->image_buf = NULL; + } +} + +void lv_nanovg_transform(NVGcontext * ctx, const lv_matrix_t * matrix) +{ + LV_ASSERT_NULL(ctx); + LV_ASSERT_NULL(matrix); + LV_PROFILER_DRAW_BEGIN; + + nvgTransform(ctx, + matrix->m[0][0], + matrix->m[1][0], + matrix->m[0][1], + matrix->m[1][1], + matrix->m[0][2], + matrix->m[1][2]); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_set_clip_area(NVGcontext * ctx, const lv_area_t * area) +{ + LV_ASSERT_NULL(ctx); + LV_ASSERT_NULL(area); + LV_PROFILER_DRAW_BEGIN; + + nvgScissor(ctx, + area->x1, area->y1, + lv_area_get_width(area), lv_area_get_height(area)); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_path_append_rect(NVGcontext * ctx, float x, float y, float w, float h, float r) +{ + LV_ASSERT_NULL(ctx); + + LV_PROFILER_DRAW_BEGIN; + + if(r > 0) { + const float half_w = w / 2.0f; + const float half_h = h / 2.0f; + + /*clamping cornerRadius by minimum size*/ + const float r_max = LV_MIN(half_w, half_h); + + nvgRoundedRect(ctx, x, y, w, h, r > r_max ? r_max : r); + LV_PROFILER_DRAW_END; + return; + } + + nvgRect(ctx, x, y, w, h); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_path_append_area(NVGcontext * ctx, const lv_area_t * area) +{ + LV_ASSERT_NULL(ctx); + LV_ASSERT_NULL(area); + LV_PROFILER_DRAW_BEGIN; + + nvgRect(ctx, area->x1, area->y1, lv_area_get_width(area), lv_area_get_height(area)); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_path_append_arc_right_angle(NVGcontext * ctx, + float start_x, float start_y, + float center_x, float center_y, + float end_x, float end_y) +{ + LV_PROFILER_DRAW_BEGIN; + float dx1 = center_x - start_x; + float dy1 = center_y - start_y; + float dx2 = end_x - center_x; + float dy2 = end_y - center_y; + + float c = SIGN(dx1 * dy2 - dx2 * dy1) * PATH_ARC_MAGIC; + + nvgBezierTo(ctx, + start_x - c * dy1, start_y + c * dx1, + end_x - c * dy2, end_y + c * dx2, + end_x, end_y); + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_path_append_arc(NVGcontext * ctx, + float cx, float cy, + float radius, + float start_angle, + float sweep, + bool pie) +{ + LV_PROFILER_DRAW_BEGIN; + + if(radius <= 0) { + LV_PROFILER_DRAW_END; + return; + } + + /* just circle */ + if(sweep >= 360.0f || sweep <= -360.0f) { + nvgCircle(ctx, cx, cy, radius); + LV_PROFILER_DRAW_END; + return; + } + + start_angle = NVG_MATH_RADIANS(start_angle); + sweep = NVG_MATH_RADIANS(sweep); + + int n_curves = (int)ceil(NVG_MATH_FABSF(sweep / NVG_MATH_HALF_PI)); + float sweep_sign = sweep < 0 ? -1.f : 1.f; + float fract = fmodf(sweep, NVG_MATH_HALF_PI); + fract = (nvg_math_is_zero(fract)) ? NVG_MATH_HALF_PI * sweep_sign : fract; + + /* Start from here */ + float start_x = radius * NVG_MATH_COSF(start_angle); + float start_y = radius * NVG_MATH_SINF(start_angle); + + if(pie) { + nvgMoveTo(ctx, cx, cy); + nvgLineTo(ctx, start_x + cx, start_y + cy); + } + + for(int i = 0; i < n_curves; ++i) { + float end_angle = start_angle + ((i != n_curves - 1) ? NVG_MATH_HALF_PI * sweep_sign : fract); + float end_x = radius * NVG_MATH_COSF(end_angle); + float end_y = radius * NVG_MATH_SINF(end_angle); + + /* variables needed to calculate bezier control points */ + + /** get bezier control points using article: + * (http://itc.ktu.lt/index.php/ITC/article/view/11812/6479) + */ + float ax = start_x; + float ay = start_y; + float bx = end_x; + float by = end_y; + float q1 = ax * ax + ay * ay; + float q2 = ax * bx + ay * by + q1; + float k2 = (4.0f / 3.0f) * ((NVG_MATH_SQRTF(2 * q1 * q2) - q2) / (ax * by - ay * bx)); + + /* Next start point is the current end point */ + start_x = end_x; + start_y = end_y; + + end_x += cx; + end_y += cy; + + float ctrl1_x = ax - k2 * ay + cx; + float ctrl1_y = ay + k2 * ax + cy; + float ctrl2_x = bx + k2 * by + cx; + float ctrl2_y = by - k2 * bx + cy; + + nvgBezierTo(ctx, ctrl1_x, ctrl1_y, ctrl2_x, ctrl2_y, end_x, end_y); + start_angle = end_angle; + } + + if(pie) { + nvgClosePath(ctx); + } + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_fill(NVGcontext * ctx, enum NVGwinding winding, enum NVGcompositeOperation composite_operation, + NVGcolor color) +{ + LV_ASSERT_NULL(ctx); + LV_PROFILER_DRAW_BEGIN; + nvgPathWinding(ctx, winding); + nvgGlobalCompositeOperation(ctx, composite_operation); + nvgFillColor(ctx, color); + nvgFill(ctx); + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_end_frame(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_PROFILER_DRAW_BEGIN; + + if(!u->is_started) { + LV_PROFILER_DRAW_END; + return; + } + + LV_PROFILER_DRAW_BEGIN_TAG("nvgEndFrame"); + nvgEndFrame(u->vg); + LV_PROFILER_DRAW_END_TAG("nvgEndFrame"); + + lv_nanovg_clean_up(u); + + LV_PROFILER_DRAW_END; +} + +void lv_nanovg_clean_up(struct _lv_draw_nanovg_unit_t * u) +{ + LV_ASSERT_NULL(u); + LV_PROFILER_DRAW_BEGIN; + + lv_pending_remove_all(u->image_pending); + lv_pending_remove_all(u->letter_pending); + u->is_started = false; + + LV_PROFILER_DRAW_END; +} + +lv_draw_buf_t * lv_nanovg_reshape_global_image(struct _lv_draw_nanovg_unit_t * u, + lv_color_format_t cf, + uint32_t w, + uint32_t h) +{ + LV_ASSERT_NULL(u); + + uint32_t stride = (w * lv_color_format_get_bpp(cf) + 7) >> 3; + lv_draw_buf_t * tmp_buf = lv_draw_buf_reshape(u->image_buf, cf, w, h, stride); + + if(!tmp_buf) { + if(u->image_buf) { + lv_draw_buf_destroy(u->image_buf); + u->image_buf = NULL; + } + + tmp_buf = lv_draw_buf_create(w, h, cf, stride); + if(!tmp_buf) { + return NULL; + } + } + + u->image_buf = tmp_buf; + + return u->image_buf; +} + +/********************** +* STATIC FUNCTIONS +**********************/ + +#endif /* LV_USE_DRAW_NANOVG */ diff --git a/src/draw/nanovg/lv_nanovg_utils.h b/src/draw/nanovg/lv_nanovg_utils.h new file mode 100644 index 0000000000..d1cb305aca --- /dev/null +++ b/src/draw/nanovg/lv_nanovg_utils.h @@ -0,0 +1,189 @@ +/** + * @file lv_nanovg_utils.h + * + */ + +#ifndef LV_NANOVG_UTILS_H +#define LV_NANOVG_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "../../lv_conf_internal.h" + +#if LV_USE_DRAW_NANOVG + +#include "../../misc/lv_assert.h" +#include "../../misc/lv_matrix.h" +#include "../../misc/lv_color.h" +#include "../../libs/nanovg/nanovg.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +struct _lv_draw_nanovg_unit_t; + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * Initialize NanoVG utilities + * @param u pointer to the nanovg unit + */ +void lv_nanovg_utils_init(struct _lv_draw_nanovg_unit_t * u); + +/** + * Deinitialize NanoVG utilities + * @param u pointer to the nanovg unit + */ +void lv_nanovg_utils_deinit(struct _lv_draw_nanovg_unit_t * u); + +/** + * Convert an LVGL matrix to a NanoVG transform (3x2 matrix) + * @param xform the NanoVG transform array (6 floats) + * @param matrix the LVGL matrix + */ +static inline void lv_nanovg_matrix_convert(float * xform, const lv_matrix_t * matrix) +{ + LV_ASSERT_NULL(xform); + LV_ASSERT_NULL(matrix); + xform[0] = matrix->m[0][0]; + xform[1] = matrix->m[1][0]; + xform[2] = matrix->m[0][1]; + xform[3] = matrix->m[1][1]; + xform[4] = matrix->m[0][2]; + xform[5] = matrix->m[1][2]; +} + +/** + * Convert an LVGL color to a NanoVG color + * @param color the LVGL color + * @param opa the opacity + * @return the NanoVG color + */ +static inline NVGcolor lv_nanovg_color_convert(lv_color_t color, lv_opa_t opa) +{ + return nvgRGBA(color.red, color.green, color.blue, opa); +} + +/** + * Apply a transform matrix to the NanoVG context + * @param ctx the NanoVG context + * @param matrix the transform matrix + */ +void lv_nanovg_transform(NVGcontext * ctx, const lv_matrix_t * matrix); + +/** + * Set the clipping area + * @param ctx the NanoVG context + * @param area the clipping area + */ +void lv_nanovg_set_clip_area(NVGcontext * ctx, const lv_area_t * area); + +/** + * Append a rectangle to the path + * @param ctx the NanoVG context + * @param x the x coordinate of the rectangle + * @param y the y coordinate of the rectangle + * @param w the width of the rectangle + * @param h the height of the rectangle + * @param r the radius of the rectangle (0 for no rounding) + */ +void lv_nanovg_path_append_rect(NVGcontext * ctx, float x, float y, float w, float h, float r); + +/** + * Append an area to the path + * @param ctx the NanoVG context + * @param area the area + */ +void lv_nanovg_path_append_area(NVGcontext * ctx, const lv_area_t * area); + +/** + * Append a right angle arc to the path + * @param ctx the NanoVG context + * @param start_x the starting x coordinate + * @param start_y the starting y coordinate + * @param center_x the center x coordinate + * @param center_y the center y coordinate + * @param end_x the ending x coordinate + * @param end_y the ending y coordinate + */ +void lv_nanovg_path_append_arc_right_angle(NVGcontext * ctx, + float start_x, float start_y, + float center_x, float center_y, + float end_x, float end_y); + +/** + * Append an arc to the path + * @param ctx the NanoVG context + * @param cx the center x coordinate + * @param cy the center y coordinate + * @param radius the radius + * @param start_angle the starting angle in radians + * @param sweep the sweep angle in radians + * @param pie whether to draw a pie slice (connected to center) + */ +void lv_nanovg_path_append_arc(NVGcontext * ctx, + float cx, float cy, + float radius, + float start_angle, + float sweep, + bool pie); + +/** + * Fill the current path + * @param ctx the NanoVG context + * @param winding the winding rule + * @param composite_operation the blend mode + * @param color the fill color + */ +void lv_nanovg_fill(NVGcontext * ctx, enum NVGwinding winding, enum NVGcompositeOperation composite_operation, + NVGcolor color); + +/** + * End the current frame + * @param u pointer to the nanovg unit + */ +void lv_nanovg_end_frame(struct _lv_draw_nanovg_unit_t * u); + +/** + * Clean up the NanoVG unit (e.g. at the end of task) + * @param u pointer to the nanovg unit + */ +void lv_nanovg_clean_up(struct _lv_draw_nanovg_unit_t * u); + +/** + * Reshape the global image buffer + * @param u pointer to the nanovg unit + * @param cf the color format + * @param w the new width + * @param h the new height + * @return pointer to the resized draw buffer + */ +lv_draw_buf_t * lv_nanovg_reshape_global_image(struct _lv_draw_nanovg_unit_t * u, + lv_color_format_t cf, + uint32_t w, + uint32_t h); + +/********************** + * MACROS + **********************/ + +#endif /* LV_USE_DRAW_NANOVG */ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_NANOVG_UTILS_H*/ diff --git a/src/draw/snapshot/lv_snapshot.c b/src/draw/snapshot/lv_snapshot.c index 6885f2ed1e..129a6ae5db 100644 --- a/src/draw/snapshot/lv_snapshot.c +++ b/src/draw/snapshot/lv_snapshot.c @@ -125,6 +125,8 @@ lv_result_t lv_snapshot_take_to_draw_buf(lv_obj_t * obj, lv_color_format_t cf, l layer._clip_area = snapshot_area; layer.phy_clip_area = snapshot_area; + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_CREATED, &layer); + lv_display_t * disp_old = lv_refr_get_disp_refreshing(); lv_display_t * disp_new = lv_obj_get_display(obj); lv_layer_t * layer_old = disp_new->layer_head; @@ -178,6 +180,9 @@ lv_result_t lv_snapshot_take_to_draw_buf(lv_obj_t * obj, lv_color_format_t cf, l disp_new->layer_head = layer_old; lv_refr_set_disp_refreshing(disp_old); + lv_draw_unit_send_event(NULL, LV_EVENT_SCREEN_LOAD_START, &layer); + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_DELETED, &layer); + return LV_RESULT_OK; } diff --git a/src/libs/nanovg/LICENSE.txt b/src/libs/nanovg/LICENSE.txt new file mode 100644 index 0000000000..2a03a1a61e --- /dev/null +++ b/src/libs/nanovg/LICENSE.txt @@ -0,0 +1,18 @@ +Copyright (c) 2013 Mikko Mononen memon@inside.org + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not +claim that you wrote the original software. If you use this software +in a product, an acknowledgment in the product documentation would be +appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be +misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. + diff --git a/src/libs/nanovg/nanovg.c b/src/libs/nanovg/nanovg.c new file mode 100644 index 0000000000..9e82e23b93 --- /dev/null +++ b/src/libs/nanovg/nanovg.c @@ -0,0 +1,2577 @@ +// +// Copyright (c) 2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "../../lv_conf_internal.h" + +#if LV_USE_NANOVG + +#include +#include "../../stdlib/lv_mem.h" +#include "../../stdlib/lv_string.h" +#include "../../misc/lv_log.h" + +#include "nanovg.h" + +#ifdef _MSC_VER + #pragma warning(disable: 4100) // unreferenced formal parameter + #pragma warning(disable: 4127) // conditional expression is constant + #pragma warning(disable: 4204) // nonstandard extension used : non-constant aggregate initializer + #pragma warning(disable: 4706) // assignment within conditional expression +#endif + +#define NVG_INIT_FONTIMAGE_SIZE 512 +#define NVG_MAX_FONTIMAGE_SIZE 2048 +#define NVG_MAX_FONTIMAGES 4 + +#define NVG_INIT_COMMANDS_SIZE 256 +#define NVG_INIT_POINTS_SIZE 128 +#define NVG_INIT_PATHS_SIZE 16 +#define NVG_INIT_VERTS_SIZE 256 + +#ifndef NVG_MAX_STATES + #define NVG_MAX_STATES 32 +#endif + +#define NVG_KAPPA90 0.5522847493f // Length proportional to radius of a cubic bezier handle for 90deg arcs. + +#define NVG_COUNTOF(arr) (sizeof(arr) / sizeof(0[arr])) + +/** + * This value determines the maximum permissible pixel error when a Bézier curve is subdivided into line segments; + * a smaller value results in smoother lines but also more vertices. + */ +#define NVG_TESS_TOL_FACTOR 0.5f + +enum NVGcommands { + NVG_MOVETO = 0, + NVG_LINETO = 1, + NVG_BEZIERTO = 2, + NVG_CLOSE = 3, + NVG_WINDING = 4, +}; + +enum NVGpointFlags { + NVG_PT_CORNER = 0x01, + NVG_PT_LEFT = 0x02, + NVG_PT_BEVEL = 0x04, + NVG_PR_INNERBEVEL = 0x08, +}; + +struct NVGstate { + NVGcompositeOperationState compositeOperation; + int shapeAntiAlias; + NVGpaint fill; + NVGpaint stroke; + float strokeWidth; + float miterLimit; + int lineJoin; + int lineCap; + float alpha; + float xform[6]; + NVGscissor scissor; + float fontSize; + float letterSpacing; + float lineHeight; + float fontBlur; + int textAlign; + int fontId; +}; +typedef struct NVGstate NVGstate; + +struct NVGpoint { + float x, y; + float dx, dy; + float len; + float dmx, dmy; + unsigned char flags; +}; +typedef struct NVGpoint NVGpoint; + +struct NVGpathCache { + NVGpoint * points; + int npoints; + int cpoints; + NVGpath * paths; + int npaths; + int cpaths; + NVGvertex * verts; + int nverts; + int cverts; + float bounds[4]; +}; +typedef struct NVGpathCache NVGpathCache; + +struct NVGcontext { + NVGparams params; + float * commands; + int ccommands; + int ncommands; + float commandx, commandy; + NVGstate states[NVG_MAX_STATES]; + int nstates; + NVGpathCache * cache; + float tessTol; + float distTol; + float fringeWidth; + float devicePxRatio; + struct FONScontext * fs; + int fontImages[NVG_MAX_FONTIMAGES]; + int fontImageIdx; + int drawCallCount; + int fillTriCount; + int strokeTriCount; + int textTriCount; +}; + +static float nvg__sqrtf(float a) +{ + return sqrtf(a); +} +static float nvg__modf(float a, float b) +{ + return fmodf(a, b); +} +static float nvg__sinf(float a) +{ + return sinf(a); +} +static float nvg__cosf(float a) +{ + return cosf(a); +} +static float nvg__tanf(float a) +{ + return tanf(a); +} +static float nvg__atan2f(float a, float b) +{ + return atan2f(a, b); +} +static float nvg__acosf(float a) +{ + return acosf(a); +} + +static int nvg__mini(int a, int b) +{ + return a < b ? a : b; +} +static int nvg__maxi(int a, int b) +{ + return a > b ? a : b; +} +static int nvg__clampi(int a, int mn, int mx) +{ + return a < mn ? mn : (a > mx ? mx : a); +} +static float nvg__minf(float a, float b) +{ + return a < b ? a : b; +} +static float nvg__maxf(float a, float b) +{ + return a > b ? a : b; +} +static float nvg__absf(float a) +{ + return a >= 0.0f ? a : -a; +} +static float nvg__signf(float a) +{ + return a >= 0.0f ? 1.0f : -1.0f; +} +static float nvg__clampf(float a, float mn, float mx) +{ + return a < mn ? mn : (a > mx ? mx : a); +} +static float nvg__cross(float dx0, float dy0, float dx1, float dy1) +{ + return dx1 * dy0 - dx0 * dy1; +} + +static float nvg__normalize(float * x, float * y) +{ + float d = nvg__sqrtf((*x) * (*x) + (*y) * (*y)); + if(d > 1e-6f) { + float id = 1.0f / d; + *x *= id; + *y *= id; + } + return d; +} + + +static void nvg__deletePathCache(NVGpathCache * c) +{ + if(c == NULL) return; + if(c->points != NULL) lv_free(c->points); + if(c->paths != NULL) lv_free(c->paths); + if(c->verts != NULL) lv_free(c->verts); + lv_free(c); +} + +static NVGpathCache * nvg__allocPathCache(void) +{ + NVGpathCache * c = (NVGpathCache *)lv_malloc(sizeof(NVGpathCache)); + if(c == NULL) goto error; + lv_memzero(c, sizeof(NVGpathCache)); + + c->points = (NVGpoint *)lv_malloc(sizeof(NVGpoint) * NVG_INIT_POINTS_SIZE); + if(!c->points) goto error; + c->npoints = 0; + c->cpoints = NVG_INIT_POINTS_SIZE; + + c->paths = (NVGpath *)lv_malloc(sizeof(NVGpath) * NVG_INIT_PATHS_SIZE); + if(!c->paths) goto error; + c->npaths = 0; + c->cpaths = NVG_INIT_PATHS_SIZE; + + c->verts = (NVGvertex *)lv_malloc(sizeof(NVGvertex) * NVG_INIT_VERTS_SIZE); + if(!c->verts) goto error; + c->nverts = 0; + c->cverts = NVG_INIT_VERTS_SIZE; + + return c; +error: + nvg__deletePathCache(c); + return NULL; +} + +static void nvg__setDevicePixelRatio(NVGcontext * ctx, float ratio) +{ + ctx->tessTol = NVG_TESS_TOL_FACTOR / ratio; + ctx->distTol = 0.01f / ratio; + ctx->fringeWidth = 1.0f / ratio; + ctx->devicePxRatio = ratio; +} + +static NVGcompositeOperationState nvg__compositeOperationState(int op) +{ + int sfactor, dfactor; + + if(op == NVG_SOURCE_OVER) { + sfactor = NVG_ONE; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if(op == NVG_SOURCE_IN) { + sfactor = NVG_DST_ALPHA; + dfactor = NVG_ZERO; + } + else if(op == NVG_SOURCE_OUT) { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ZERO; + } + else if(op == NVG_ATOP) { + sfactor = NVG_DST_ALPHA; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if(op == NVG_DESTINATION_OVER) { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ONE; + } + else if(op == NVG_DESTINATION_IN) { + sfactor = NVG_ZERO; + dfactor = NVG_SRC_ALPHA; + } + else if(op == NVG_DESTINATION_OUT) { + sfactor = NVG_ZERO; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if(op == NVG_DESTINATION_ATOP) { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_SRC_ALPHA; + } + else if(op == NVG_LIGHTER) { + sfactor = NVG_ONE; + dfactor = NVG_ONE; + } + else if(op == NVG_COPY) { + sfactor = NVG_ONE; + dfactor = NVG_ZERO; + } + else if(op == NVG_XOR) { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else { + sfactor = NVG_ONE; + dfactor = NVG_ZERO; + } + + NVGcompositeOperationState state; + state.srcRGB = sfactor; + state.dstRGB = dfactor; + state.srcAlpha = sfactor; + state.dstAlpha = dfactor; + return state; +} + +static NVGstate * nvg__getState(NVGcontext * ctx) +{ + return &ctx->states[ctx->nstates - 1]; +} + +NVGcontext * nvgCreateInternal(NVGparams * params) +{ + NVGcontext * ctx = (NVGcontext *)lv_malloc(sizeof(NVGcontext)); + int i; + if(ctx == NULL) goto error; + lv_memzero(ctx, sizeof(NVGcontext)); + + ctx->params = *params; + for(i = 0; i < NVG_MAX_FONTIMAGES; i++) + ctx->fontImages[i] = 0; + + ctx->commands = (float *)lv_malloc(sizeof(float) * NVG_INIT_COMMANDS_SIZE); + if(!ctx->commands) goto error; + ctx->ncommands = 0; + ctx->ccommands = NVG_INIT_COMMANDS_SIZE; + + ctx->cache = nvg__allocPathCache(); + if(ctx->cache == NULL) goto error; + + nvgSave(ctx); + nvgReset(ctx); + + nvg__setDevicePixelRatio(ctx, 1.0f); + + if(ctx->params.renderCreate(ctx->params.userPtr) == 0) goto error; + + return ctx; + +error: + nvgDeleteInternal(ctx); + return 0; +} + +NVGparams * nvgInternalParams(NVGcontext * ctx) +{ + return &ctx->params; +} + +void nvgDeleteInternal(NVGcontext * ctx) +{ + int i; + if(ctx == NULL) return; + if(ctx->commands != NULL) lv_free(ctx->commands); + if(ctx->cache != NULL) nvg__deletePathCache(ctx->cache); + + for(i = 0; i < NVG_MAX_FONTIMAGES; i++) { + if(ctx->fontImages[i] != 0) { + nvgDeleteImage(ctx, ctx->fontImages[i]); + ctx->fontImages[i] = 0; + } + } + + if(ctx->params.renderDelete != NULL) + ctx->params.renderDelete(ctx->params.userPtr); + + lv_free(ctx); +} + +void nvgBeginFrame(NVGcontext * ctx, float windowWidth, float windowHeight, float devicePixelRatio) +{ + /* printf("Tris: draws:%d fill:%d stroke:%d text:%d TOT:%d\n", + ctx->drawCallCount, ctx->fillTriCount, ctx->strokeTriCount, ctx->textTriCount, + ctx->fillTriCount+ctx->strokeTriCount+ctx->textTriCount);*/ + + ctx->nstates = 0; + nvgSave(ctx); + nvgReset(ctx); + + nvg__setDevicePixelRatio(ctx, devicePixelRatio); + + ctx->params.renderViewport(ctx->params.userPtr, windowWidth, windowHeight, devicePixelRatio); + + ctx->drawCallCount = 0; + ctx->fillTriCount = 0; + ctx->strokeTriCount = 0; + ctx->textTriCount = 0; +} + +void nvgCancelFrame(NVGcontext * ctx) +{ + ctx->params.renderCancel(ctx->params.userPtr); +} + +void nvgEndFrame(NVGcontext * ctx) +{ + ctx->params.renderFlush(ctx->params.userPtr); + if(ctx->fontImageIdx != 0) { + int fontImage = ctx->fontImages[ctx->fontImageIdx]; + ctx->fontImages[ctx->fontImageIdx] = 0; + int i, j, iw, ih; + // delete images that smaller than current one + if(fontImage == 0) + return; + nvgImageSize(ctx, fontImage, &iw, &ih); + for(i = j = 0; i < ctx->fontImageIdx; i++) { + if(ctx->fontImages[i] != 0) { + int nw, nh; + int image = ctx->fontImages[i]; + ctx->fontImages[i] = 0; + nvgImageSize(ctx, image, &nw, &nh); + if(nw < iw || nh < ih) + nvgDeleteImage(ctx, image); + else + ctx->fontImages[j++] = image; + } + } + // make current font image to first + ctx->fontImages[j] = ctx->fontImages[0]; + ctx->fontImages[0] = fontImage; + ctx->fontImageIdx = 0; + } +} + +NVGcolor nvgRGB(unsigned char r, unsigned char g, unsigned char b) +{ + return nvgRGBA(r, g, b, 255); +} + +NVGcolor nvgRGBf(float r, float g, float b) +{ + return nvgRGBAf(r, g, b, 1.0f); +} + +NVGcolor nvgRGBA(unsigned char r, unsigned char g, unsigned char b, unsigned char a) +{ + NVGcolor color; + // Use longer initialization to suppress warning. + color.ch.r = r / 255.0f; + color.ch.g = g / 255.0f; + color.ch.b = b / 255.0f; + color.ch.a = a / 255.0f; + return color; +} + +NVGcolor nvgRGBAf(float r, float g, float b, float a) +{ + NVGcolor color; + // Use longer initialization to suppress warning. + color.ch.r = r; + color.ch.g = g; + color.ch.b = b; + color.ch.a = a; + return color; +} + +NVGcolor nvgTransRGBA(NVGcolor c, unsigned char a) +{ + c.ch.a = a / 255.0f; + return c; +} + +NVGcolor nvgTransRGBAf(NVGcolor c, float a) +{ + c.ch.a = a; + return c; +} + +NVGcolor nvgLerpRGBA(NVGcolor c0, NVGcolor c1, float u) +{ + int i; + float oneminu; + NVGcolor cint = { 0 }; + + u = nvg__clampf(u, 0.0f, 1.0f); + oneminu = 1.0f - u; + for(i = 0; i < 4; i++) { + cint.rgba[i] = c0.rgba[i] * oneminu + c1.rgba[i] * u; + } + + return cint; +} + +NVGcolor nvgHSL(float h, float s, float l) +{ + return nvgHSLA(h, s, l, 255); +} + +static float nvg__hue(float h, float m1, float m2) +{ + if(h < 0) h += 1; + if(h > 1) h -= 1; + if(h < 1.0f / 6.0f) + return m1 + (m2 - m1) * h * 6.0f; + else if(h < 3.0f / 6.0f) + return m2; + else if(h < 4.0f / 6.0f) + return m1 + (m2 - m1) * (2.0f / 3.0f - h) * 6.0f; + return m1; +} + +NVGcolor nvgHSLA(float h, float s, float l, unsigned char a) +{ + float m1, m2; + NVGcolor col; + h = nvg__modf(h, 1.0f); + if(h < 0.0f) h += 1.0f; + s = nvg__clampf(s, 0.0f, 1.0f); + l = nvg__clampf(l, 0.0f, 1.0f); + m2 = l <= 0.5f ? (l * (1 + s)) : (l + s - l * s); + m1 = 2 * l - m2; + col.ch.r = nvg__clampf(nvg__hue(h + 1.0f / 3.0f, m1, m2), 0.0f, 1.0f); + col.ch.g = nvg__clampf(nvg__hue(h, m1, m2), 0.0f, 1.0f); + col.ch.b = nvg__clampf(nvg__hue(h - 1.0f / 3.0f, m1, m2), 0.0f, 1.0f); + col.ch.a = a / 255.0f; + return col; +} + +void nvgTransformIdentity(float * t) +{ + t[0] = 1.0f; + t[1] = 0.0f; + t[2] = 0.0f; + t[3] = 1.0f; + t[4] = 0.0f; + t[5] = 0.0f; +} + +void nvgTransformTranslate(float * t, float tx, float ty) +{ + t[0] = 1.0f; + t[1] = 0.0f; + t[2] = 0.0f; + t[3] = 1.0f; + t[4] = tx; + t[5] = ty; +} + +void nvgTransformScale(float * t, float sx, float sy) +{ + t[0] = sx; + t[1] = 0.0f; + t[2] = 0.0f; + t[3] = sy; + t[4] = 0.0f; + t[5] = 0.0f; +} + +void nvgTransformRotate(float * t, float a) +{ + float cs = nvg__cosf(a), sn = nvg__sinf(a); + t[0] = cs; + t[1] = sn; + t[2] = -sn; + t[3] = cs; + t[4] = 0.0f; + t[5] = 0.0f; +} + +void nvgTransformSkewX(float * t, float a) +{ + t[0] = 1.0f; + t[1] = 0.0f; + t[2] = nvg__tanf(a); + t[3] = 1.0f; + t[4] = 0.0f; + t[5] = 0.0f; +} + +void nvgTransformSkewY(float * t, float a) +{ + t[0] = 1.0f; + t[1] = nvg__tanf(a); + t[2] = 0.0f; + t[3] = 1.0f; + t[4] = 0.0f; + t[5] = 0.0f; +} + +void nvgTransformMultiply(float * t, const float * s) +{ + float t0 = t[0] * s[0] + t[1] * s[2]; + float t2 = t[2] * s[0] + t[3] * s[2]; + float t4 = t[4] * s[0] + t[5] * s[2] + s[4]; + t[1] = t[0] * s[1] + t[1] * s[3]; + t[3] = t[2] * s[1] + t[3] * s[3]; + t[5] = t[4] * s[1] + t[5] * s[3] + s[5]; + t[0] = t0; + t[2] = t2; + t[4] = t4; +} + +void nvgTransformPremultiply(float * t, const float * s) +{ + float s2[6]; + lv_memcpy(s2, s, sizeof(float) * 6); + nvgTransformMultiply(s2, t); + lv_memcpy(t, s2, sizeof(float) * 6); +} + +int nvgTransformInverse(float * inv, const float * t) +{ + double invdet, det = (double)t[0] * t[3] - (double)t[2] * t[1]; + if(det > -1e-6 && det < 1e-6) { + nvgTransformIdentity(inv); + return 0; + } + invdet = 1.0 / det; + inv[0] = (float)(t[3] * invdet); + inv[2] = (float)(-t[2] * invdet); + inv[4] = (float)(((double)t[2] * t[5] - (double)t[3] * t[4]) * invdet); + inv[1] = (float)(-t[1] * invdet); + inv[3] = (float)(t[0] * invdet); + inv[5] = (float)(((double)t[1] * t[4] - (double)t[0] * t[5]) * invdet); + return 1; +} + +void nvgTransformPoint(float * dx, float * dy, const float * t, float sx, float sy) +{ + *dx = sx * t[0] + sy * t[2] + t[4]; + *dy = sx * t[1] + sy * t[3] + t[5]; +} + +float nvgDegToRad(float deg) +{ + return deg / 180.0f * NVG_PI; +} + +float nvgRadToDeg(float rad) +{ + return rad / NVG_PI * 180.0f; +} + +static void nvg__setPaintColor(NVGpaint * p, NVGcolor color) +{ + lv_memzero(p, sizeof(*p)); + nvgTransformIdentity(p->xform); + p->radius = 0.0f; + p->feather = 1.0f; + p->innerColor = color; + p->outerColor = color; +} + + +// State handling +void nvgSave(NVGcontext * ctx) +{ + if(ctx->nstates >= NVG_MAX_STATES) + return; + if(ctx->nstates > 0) + lv_memcpy(&ctx->states[ctx->nstates], &ctx->states[ctx->nstates - 1], sizeof(NVGstate)); + ctx->nstates++; +} + +void nvgRestore(NVGcontext * ctx) +{ + if(ctx->nstates <= 1) + return; + ctx->nstates--; +} + +void nvgReset(NVGcontext * ctx) +{ + NVGstate * state = nvg__getState(ctx); + lv_memzero(state, sizeof(*state)); + + nvg__setPaintColor(&state->fill, nvgRGBA(255, 255, 255, 255)); + nvg__setPaintColor(&state->stroke, nvgRGBA(0, 0, 0, 255)); + state->compositeOperation = nvg__compositeOperationState(NVG_SOURCE_OVER); + state->shapeAntiAlias = 1; + state->strokeWidth = 1.0f; + state->miterLimit = 10.0f; + state->lineCap = NVG_BUTT; + state->lineJoin = NVG_MITER; + state->alpha = 1.0f; + nvgTransformIdentity(state->xform); + + state->scissor.extent[0] = -1.0f; + state->scissor.extent[1] = -1.0f; + + state->fontSize = 16.0f; + state->letterSpacing = 0.0f; + state->lineHeight = 1.0f; + state->fontBlur = 0.0f; + state->textAlign = NVG_ALIGN_LEFT | NVG_ALIGN_BASELINE; + state->fontId = 0; +} + +// State setting +void nvgShapeAntiAlias(NVGcontext * ctx, int enabled) +{ + NVGstate * state = nvg__getState(ctx); + state->shapeAntiAlias = enabled; +} + +void nvgStrokeWidth(NVGcontext * ctx, float width) +{ + NVGstate * state = nvg__getState(ctx); + state->strokeWidth = width; +} + +void nvgMiterLimit(NVGcontext * ctx, float limit) +{ + NVGstate * state = nvg__getState(ctx); + state->miterLimit = limit; +} + +void nvgLineCap(NVGcontext * ctx, int cap) +{ + NVGstate * state = nvg__getState(ctx); + state->lineCap = cap; +} + +void nvgLineJoin(NVGcontext * ctx, int join) +{ + NVGstate * state = nvg__getState(ctx); + state->lineJoin = join; +} + +void nvgGlobalAlpha(NVGcontext * ctx, float alpha) +{ + NVGstate * state = nvg__getState(ctx); + state->alpha = alpha; +} + +void nvgTransform(NVGcontext * ctx, float a, float b, float c, float d, float e, float f) +{ + NVGstate * state = nvg__getState(ctx); + float t[6] = { a, b, c, d, e, f }; + nvgTransformPremultiply(state->xform, t); +} + +void nvgResetTransform(NVGcontext * ctx) +{ + NVGstate * state = nvg__getState(ctx); + nvgTransformIdentity(state->xform); +} + +void nvgTranslate(NVGcontext * ctx, float x, float y) +{ + NVGstate * state = nvg__getState(ctx); + float t[6]; + nvgTransformTranslate(t, x, y); + nvgTransformPremultiply(state->xform, t); +} + +void nvgRotate(NVGcontext * ctx, float angle) +{ + NVGstate * state = nvg__getState(ctx); + float t[6]; + nvgTransformRotate(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgSkewX(NVGcontext * ctx, float angle) +{ + NVGstate * state = nvg__getState(ctx); + float t[6]; + nvgTransformSkewX(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgSkewY(NVGcontext * ctx, float angle) +{ + NVGstate * state = nvg__getState(ctx); + float t[6]; + nvgTransformSkewY(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgScale(NVGcontext * ctx, float x, float y) +{ + NVGstate * state = nvg__getState(ctx); + float t[6]; + nvgTransformScale(t, x, y); + nvgTransformPremultiply(state->xform, t); +} + +void nvgCurrentTransform(NVGcontext * ctx, float * xform) +{ + NVGstate * state = nvg__getState(ctx); + if(xform == NULL) return; + lv_memcpy(xform, state->xform, sizeof(float) * 6); +} + +void nvgStrokeColor(NVGcontext * ctx, NVGcolor color) +{ + NVGstate * state = nvg__getState(ctx); + nvg__setPaintColor(&state->stroke, color); +} + +void nvgStrokePaint(NVGcontext * ctx, NVGpaint paint) +{ + NVGstate * state = nvg__getState(ctx); + state->stroke = paint; + nvgTransformMultiply(state->stroke.xform, state->xform); +} + +void nvgFillColor(NVGcontext * ctx, NVGcolor color) +{ + NVGstate * state = nvg__getState(ctx); + nvg__setPaintColor(&state->fill, color); +} + +void nvgFillPaint(NVGcontext * ctx, NVGpaint paint) +{ + NVGstate * state = nvg__getState(ctx); + state->fill = paint; + nvgTransformMultiply(state->fill.xform, state->xform); +} + +int nvgCreateImage(NVGcontext * ctx, int w, int h, int imageFlags, int format, const unsigned char * data) +{ + return ctx->params.renderCreateTexture(ctx->params.userPtr, format, w, h, imageFlags, data); +} + +void nvgUpdateImage(NVGcontext * ctx, int image, const unsigned char * data) +{ + int w, h; + ctx->params.renderGetTextureSize(ctx->params.userPtr, image, &w, &h); + ctx->params.renderUpdateTexture(ctx->params.userPtr, image, 0, 0, w, h, data); +} + +void nvgImageSize(NVGcontext * ctx, int image, int * w, int * h) +{ + ctx->params.renderGetTextureSize(ctx->params.userPtr, image, w, h); +} + +void nvgDeleteImage(NVGcontext * ctx, int image) +{ + ctx->params.renderDeleteTexture(ctx->params.userPtr, image); +} + +NVGpaint nvgLinearGradient(NVGcontext * ctx, + float sx, float sy, float ex, float ey, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + float dx, dy, d; + const float large = 1e5; + NVG_NOTUSED(ctx); + lv_memzero(&p, sizeof(p)); + + // Calculate transform aligned to the line + dx = ex - sx; + dy = ey - sy; + d = sqrtf(dx * dx + dy * dy); + if(d > 0.0001f) { + dx /= d; + dy /= d; + } + else { + dx = 0; + dy = 1; + } + + p.xform[0] = dy; + p.xform[1] = -dx; + p.xform[2] = dx; + p.xform[3] = dy; + p.xform[4] = sx - dx * large; + p.xform[5] = sy - dy * large; + + p.extent[0] = large; + p.extent[1] = large + d * 0.5f; + + p.radius = 0.0f; + + p.feather = nvg__maxf(1.0f, d); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + +NVGpaint nvgRadialGradient(NVGcontext * ctx, + float cx, float cy, float inr, float outr, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + float r = (inr + outr) * 0.5f; + float f = (outr - inr); + NVG_NOTUSED(ctx); + lv_memzero(&p, sizeof(p)); + + nvgTransformIdentity(p.xform); + p.xform[4] = cx; + p.xform[5] = cy; + + p.extent[0] = r; + p.extent[1] = r; + + p.radius = r; + + p.feather = nvg__maxf(1.0f, f); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + +NVGpaint nvgBoxGradient(NVGcontext * ctx, + float x, float y, float w, float h, float r, float f, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + NVG_NOTUSED(ctx); + lv_memzero(&p, sizeof(p)); + + nvgTransformIdentity(p.xform); + p.xform[4] = x + w * 0.5f; + p.xform[5] = y + h * 0.5f; + + p.extent[0] = w * 0.5f; + p.extent[1] = h * 0.5f; + + p.radius = r; + + p.feather = nvg__maxf(1.0f, f); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + + +NVGpaint nvgImagePattern(NVGcontext * ctx, + float cx, float cy, float w, float h, float angle, + int image, float alpha) +{ + NVGpaint p; + NVG_NOTUSED(ctx); + lv_memzero(&p, sizeof(p)); + + nvgTransformRotate(p.xform, angle); + p.xform[4] = cx; + p.xform[5] = cy; + + p.extent[0] = w; + p.extent[1] = h; + + p.image = image; + + p.innerColor = p.outerColor = nvgRGBAf(1, 1, 1, alpha); + + return p; +} + +// Scissoring +void nvgScissor(NVGcontext * ctx, float x, float y, float w, float h) +{ + NVGstate * state = nvg__getState(ctx); + + w = nvg__maxf(0.0f, w); + h = nvg__maxf(0.0f, h); + + nvgTransformIdentity(state->scissor.xform); + state->scissor.xform[4] = x + w * 0.5f; + state->scissor.xform[5] = y + h * 0.5f; + nvgTransformMultiply(state->scissor.xform, state->xform); + + state->scissor.extent[0] = w * 0.5f; + state->scissor.extent[1] = h * 0.5f; +} + +static void nvg__isectRects(float * dst, + float ax, float ay, float aw, float ah, + float bx, float by, float bw, float bh) +{ + float minx = nvg__maxf(ax, bx); + float miny = nvg__maxf(ay, by); + float maxx = nvg__minf(ax + aw, bx + bw); + float maxy = nvg__minf(ay + ah, by + bh); + dst[0] = minx; + dst[1] = miny; + dst[2] = nvg__maxf(0.0f, maxx - minx); + dst[3] = nvg__maxf(0.0f, maxy - miny); +} + +void nvgIntersectScissor(NVGcontext * ctx, float x, float y, float w, float h) +{ + NVGstate * state = nvg__getState(ctx); + float pxform[6], invxorm[6]; + float rect[4]; + float ex, ey, tex, tey; + + // If no previous scissor has been set, set the scissor as current scissor. + if(state->scissor.extent[0] < 0) { + nvgScissor(ctx, x, y, w, h); + return; + } + + // Transform the current scissor rect into current transform space. + // If there is difference in rotation, this will be approximation. + lv_memcpy(pxform, state->scissor.xform, sizeof(float) * 6); + ex = state->scissor.extent[0]; + ey = state->scissor.extent[1]; + nvgTransformInverse(invxorm, state->xform); + nvgTransformMultiply(pxform, invxorm); + tex = ex * nvg__absf(pxform[0]) + ey * nvg__absf(pxform[2]); + tey = ex * nvg__absf(pxform[1]) + ey * nvg__absf(pxform[3]); + + // Intersect rects. + nvg__isectRects(rect, pxform[4] - tex, pxform[5] - tey, tex * 2, tey * 2, x, y, w, h); + + nvgScissor(ctx, rect[0], rect[1], rect[2], rect[3]); +} + +void nvgResetScissor(NVGcontext * ctx) +{ + NVGstate * state = nvg__getState(ctx); + lv_memzero(state->scissor.xform, sizeof(state->scissor.xform)); + state->scissor.extent[0] = -1.0f; + state->scissor.extent[1] = -1.0f; +} + +// Global composite operation. +void nvgGlobalCompositeOperation(NVGcontext * ctx, int op) +{ + NVGstate * state = nvg__getState(ctx); + state->compositeOperation = nvg__compositeOperationState(op); +} + +void nvgGlobalCompositeBlendFunc(NVGcontext * ctx, int sfactor, int dfactor) +{ + nvgGlobalCompositeBlendFuncSeparate(ctx, sfactor, dfactor, sfactor, dfactor); +} + +void nvgGlobalCompositeBlendFuncSeparate(NVGcontext * ctx, int srcRGB, int dstRGB, int srcAlpha, int dstAlpha) +{ + NVGcompositeOperationState op; + op.srcRGB = srcRGB; + op.dstRGB = dstRGB; + op.srcAlpha = srcAlpha; + op.dstAlpha = dstAlpha; + + NVGstate * state = nvg__getState(ctx); + state->compositeOperation = op; +} + +static int nvg__ptEquals(float x1, float y1, float x2, float y2, float tol) +{ + float dx = x2 - x1; + float dy = y2 - y1; + return dx * dx + dy * dy < tol * tol; +} + +static float nvg__distPtSeg(float x, float y, float px, float py, float qx, float qy) +{ + float pqx, pqy, dx, dy, d, t; + pqx = qx - px; + pqy = qy - py; + dx = x - px; + dy = y - py; + d = pqx * pqx + pqy * pqy; + t = pqx * dx + pqy * dy; + if(d > 0) t /= d; + if(t < 0) t = 0; + else if(t > 1) t = 1; + dx = px + t * pqx - x; + dy = py + t * pqy - y; + return dx * dx + dy * dy; +} + +static void nvg__appendCommands(NVGcontext * ctx, float * vals, int nvals) +{ + NVGstate * state = nvg__getState(ctx); + int i; + + if(ctx->ncommands + nvals > ctx->ccommands) { + float * commands; + int ccommands = ctx->ncommands + nvals + ctx->ccommands / 2; + commands = (float *)lv_realloc(ctx->commands, sizeof(float) * ccommands); + if(commands == NULL) return; + ctx->commands = commands; + ctx->ccommands = ccommands; + } + + if((int)vals[0] != NVG_CLOSE && (int)vals[0] != NVG_WINDING) { + ctx->commandx = vals[nvals - 2]; + ctx->commandy = vals[nvals - 1]; + } + + // transform commands + i = 0; + while(i < nvals) { + int cmd = (int)vals[i]; + switch(cmd) { + case NVG_MOVETO: + nvgTransformPoint(&vals[i + 1], &vals[i + 2], state->xform, vals[i + 1], vals[i + 2]); + i += 3; + break; + case NVG_LINETO: + nvgTransformPoint(&vals[i + 1], &vals[i + 2], state->xform, vals[i + 1], vals[i + 2]); + i += 3; + break; + case NVG_BEZIERTO: + nvgTransformPoint(&vals[i + 1], &vals[i + 2], state->xform, vals[i + 1], vals[i + 2]); + nvgTransformPoint(&vals[i + 3], &vals[i + 4], state->xform, vals[i + 3], vals[i + 4]); + nvgTransformPoint(&vals[i + 5], &vals[i + 6], state->xform, vals[i + 5], vals[i + 6]); + i += 7; + break; + case NVG_CLOSE: + i++; + break; + case NVG_WINDING: + i += 2; + break; + default: + i++; + } + } + + lv_memcpy(&ctx->commands[ctx->ncommands], vals, nvals * sizeof(float)); + + ctx->ncommands += nvals; +} + + +static void nvg__clearPathCache(NVGcontext * ctx) +{ + ctx->cache->npoints = 0; + ctx->cache->npaths = 0; +} + +static NVGpath * nvg__lastPath(NVGcontext * ctx) +{ + if(ctx->cache->npaths > 0) + return &ctx->cache->paths[ctx->cache->npaths - 1]; + return NULL; +} + +static void nvg__addPath(NVGcontext * ctx) +{ + NVGpath * path; + if(ctx->cache->npaths + 1 > ctx->cache->cpaths) { + NVGpath * paths; + int cpaths = ctx->cache->npaths + 1 + ctx->cache->cpaths / 2; + paths = (NVGpath *)lv_realloc(ctx->cache->paths, sizeof(NVGpath) * cpaths); + if(paths == NULL) return; + ctx->cache->paths = paths; + ctx->cache->cpaths = cpaths; + } + path = &ctx->cache->paths[ctx->cache->npaths]; + lv_memzero(path, sizeof(*path)); + path->first = ctx->cache->npoints; + path->winding = NVG_CCW; + + ctx->cache->npaths++; +} + +static NVGpoint * nvg__lastPoint(NVGcontext * ctx) +{ + if(ctx->cache->npoints > 0) + return &ctx->cache->points[ctx->cache->npoints - 1]; + return NULL; +} + +static void nvg__addPoint(NVGcontext * ctx, float x, float y, int flags) +{ + NVGpath * path = nvg__lastPath(ctx); + NVGpoint * pt; + if(path == NULL) return; + + if(path->count > 0 && ctx->cache->npoints > 0) { + pt = nvg__lastPoint(ctx); + if(nvg__ptEquals(pt->x, pt->y, x, y, ctx->distTol)) { + pt->flags |= flags; + return; + } + } + + if(ctx->cache->npoints + 1 > ctx->cache->cpoints) { + NVGpoint * points; + int cpoints = ctx->cache->npoints + 1 + ctx->cache->cpoints / 2; + points = (NVGpoint *)lv_realloc(ctx->cache->points, sizeof(NVGpoint) * cpoints); + if(points == NULL) return; + ctx->cache->points = points; + ctx->cache->cpoints = cpoints; + } + + pt = &ctx->cache->points[ctx->cache->npoints]; + lv_memzero(pt, sizeof(*pt)); + pt->x = x; + pt->y = y; + pt->flags = (unsigned char)flags; + + ctx->cache->npoints++; + path->count++; +} + +static void nvg__closePath(NVGcontext * ctx) +{ + NVGpath * path = nvg__lastPath(ctx); + if(path == NULL) return; + path->closed = 1; +} + +static void nvg__pathWinding(NVGcontext * ctx, int winding) +{ + NVGpath * path = nvg__lastPath(ctx); + if(path == NULL) return; + path->winding = winding; +} + +static float nvg__getAverageScale(float * t) +{ + float sx = sqrtf(t[0] * t[0] + t[2] * t[2]); + float sy = sqrtf(t[1] * t[1] + t[3] * t[3]); + return (sx + sy) * 0.5f; +} + +static NVGvertex * nvg__allocTempVerts(NVGcontext * ctx, int nverts) +{ + if(nverts > ctx->cache->cverts) { + NVGvertex * verts; + int cverts = (nverts + 0xff) & ~0xff; // Round up to prevent allocations when things change just slightly. + verts = (NVGvertex *)lv_realloc(ctx->cache->verts, sizeof(NVGvertex) * cverts); + if(verts == NULL) return NULL; + ctx->cache->verts = verts; + ctx->cache->cverts = cverts; + } + + return ctx->cache->verts; +} + +static float nvg__triarea2(float ax, float ay, float bx, float by, float cx, float cy) +{ + float abx = bx - ax; + float aby = by - ay; + float acx = cx - ax; + float acy = cy - ay; + return acx * aby - abx * acy; +} + +static float nvg__polyArea(NVGpoint * pts, int npts) +{ + int i; + float area = 0; + for(i = 2; i < npts; i++) { + NVGpoint * a = &pts[0]; + NVGpoint * b = &pts[i - 1]; + NVGpoint * c = &pts[i]; + area += nvg__triarea2(a->x, a->y, b->x, b->y, c->x, c->y); + } + return area * 0.5f; +} + +static void nvg__polyReverse(NVGpoint * pts, int npts) +{ + NVGpoint tmp; + int i = 0, j = npts - 1; + while(i < j) { + tmp = pts[i]; + pts[i] = pts[j]; + pts[j] = tmp; + i++; + j--; + } +} + + +static void nvg__vset(NVGvertex * vtx, float x, float y, float u, float v) +{ + vtx->x = x; + vtx->y = y; + vtx->u = u; + vtx->v = v; +} + +static void nvg__tesselateBezier(NVGcontext * ctx, + float x1, float y1, float x2, float y2, + float x3, float y3, float x4, float y4, + int level, int type) +{ + float x12, y12, x23, y23, x34, y34, x123, y123, x234, y234, x1234, y1234; + float dx, dy, d2, d3; + + if(level > 10) return; + + x12 = (x1 + x2) * 0.5f; + y12 = (y1 + y2) * 0.5f; + x23 = (x2 + x3) * 0.5f; + y23 = (y2 + y3) * 0.5f; + x34 = (x3 + x4) * 0.5f; + y34 = (y3 + y4) * 0.5f; + x123 = (x12 + x23) * 0.5f; + y123 = (y12 + y23) * 0.5f; + + dx = x4 - x1; + dy = y4 - y1; + d2 = nvg__absf(((x2 - x4) * dy - (y2 - y4) * dx)); + d3 = nvg__absf(((x3 - x4) * dy - (y3 - y4) * dx)); + + if((d2 + d3) * (d2 + d3) < ctx->tessTol * (dx * dx + dy * dy)) { + nvg__addPoint(ctx, x4, y4, type); + return; + } + + /* if (nvg__absf(x1+x3-x2-x2) + nvg__absf(y1+y3-y2-y2) + nvg__absf(x2+x4-x3-x3) + nvg__absf(y2+y4-y3-y3) < ctx->tessTol) { + nvg__addPoint(ctx, x4, y4, type); + return; + }*/ + + x234 = (x23 + x34) * 0.5f; + y234 = (y23 + y34) * 0.5f; + x1234 = (x123 + x234) * 0.5f; + y1234 = (y123 + y234) * 0.5f; + + nvg__tesselateBezier(ctx, x1, y1, x12, y12, x123, y123, x1234, y1234, level + 1, 0); + nvg__tesselateBezier(ctx, x1234, y1234, x234, y234, x34, y34, x4, y4, level + 1, type); +} + +static void nvg__flattenPaths(NVGcontext * ctx) +{ + NVGpathCache * cache = ctx->cache; + // NVGstate* state = nvg__getState(ctx); + NVGpoint * last; + NVGpoint * p0; + NVGpoint * p1; + NVGpoint * pts; + NVGpath * path; + int i, j; + float * cp1; + float * cp2; + float * p; + float area; + + if(cache->npaths > 0) + return; + + // Flatten + i = 0; + while(i < ctx->ncommands) { + int cmd = (int)ctx->commands[i]; + switch(cmd) { + case NVG_MOVETO: + nvg__addPath(ctx); + p = &ctx->commands[i + 1]; + nvg__addPoint(ctx, p[0], p[1], NVG_PT_CORNER); + i += 3; + break; + case NVG_LINETO: + p = &ctx->commands[i + 1]; + nvg__addPoint(ctx, p[0], p[1], NVG_PT_CORNER); + i += 3; + break; + case NVG_BEZIERTO: + last = nvg__lastPoint(ctx); + if(last != NULL) { + cp1 = &ctx->commands[i + 1]; + cp2 = &ctx->commands[i + 3]; + p = &ctx->commands[i + 5]; + nvg__tesselateBezier(ctx, last->x, last->y, cp1[0], cp1[1], cp2[0], cp2[1], p[0], p[1], 0, NVG_PT_CORNER); + } + i += 7; + break; + case NVG_CLOSE: + nvg__closePath(ctx); + i++; + break; + case NVG_WINDING: + nvg__pathWinding(ctx, (int)ctx->commands[i + 1]); + i += 2; + break; + default: + i++; + } + } + + cache->bounds[0] = cache->bounds[1] = 1e6f; + cache->bounds[2] = cache->bounds[3] = -1e6f; + + // Calculate the direction and length of line segments. + for(j = 0; j < cache->npaths; j++) { + path = &cache->paths[j]; + pts = &cache->points[path->first]; + + // If the first and last points are the same, remove the last, mark as closed path. + p0 = &pts[path->count - 1]; + p1 = &pts[0]; + if(nvg__ptEquals(p0->x, p0->y, p1->x, p1->y, ctx->distTol)) { + path->count--; + p0 = &pts[path->count - 1]; + path->closed = 1; + } + + // Enforce winding. + if(path->count > 2) { + area = nvg__polyArea(pts, path->count); + if(path->winding == NVG_CCW && area < 0.0f) + nvg__polyReverse(pts, path->count); + if(path->winding == NVG_CW && area > 0.0f) + nvg__polyReverse(pts, path->count); + } + + for(i = 0; i < path->count; i++) { + // Calculate segment direction and length + p0->dx = p1->x - p0->x; + p0->dy = p1->y - p0->y; + p0->len = nvg__normalize(&p0->dx, &p0->dy); + // Update bounds + cache->bounds[0] = nvg__minf(cache->bounds[0], p0->x); + cache->bounds[1] = nvg__minf(cache->bounds[1], p0->y); + cache->bounds[2] = nvg__maxf(cache->bounds[2], p0->x); + cache->bounds[3] = nvg__maxf(cache->bounds[3], p0->y); + // Advance + p0 = p1++; + } + } +} + +static int nvg__curveDivs(float r, float arc, float tol) +{ + float da = acosf(r / (r + tol)) * 2.0f; + return nvg__maxi(2, (int)ceilf(arc / da)); +} + +static void nvg__chooseBevel(int bevel, NVGpoint * p0, NVGpoint * p1, float w, + float * x0, float * y0, float * x1, float * y1) +{ + if(bevel) { + *x0 = p1->x + p0->dy * w; + *y0 = p1->y - p0->dx * w; + *x1 = p1->x + p1->dy * w; + *y1 = p1->y - p1->dx * w; + } + else { + *x0 = p1->x + p1->dmx * w; + *y0 = p1->y + p1->dmy * w; + *x1 = p1->x + p1->dmx * w; + *y1 = p1->y + p1->dmy * w; + } +} + +static NVGvertex * nvg__roundJoin(NVGvertex * dst, NVGpoint * p0, NVGpoint * p1, + float lw, float rw, float lu, float ru, int ncap, + float fringe) +{ + int i, n; + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + NVG_NOTUSED(fringe); + + if(p1->flags & NVG_PT_LEFT) { + float lx0, ly0, lx1, ly1, a0, a1; + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, lw, &lx0, &ly0, &lx1, &ly1); + a0 = atan2f(-dly0, -dlx0); + a1 = atan2f(-dly1, -dlx1); + if(a1 > a0) a1 -= NVG_PI * 2; + + nvg__vset(dst, lx0, ly0, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx0 * rw, p1->y - dly0 * rw, ru, 1); + dst++; + + n = nvg__clampi((int)ceilf(((a0 - a1) / NVG_PI) * ncap), 2, ncap); + for(i = 0; i < n; i++) { + float u = i / (float)(n - 1); + float a = a0 + u * (a1 - a0); + float rx = p1->x + cosf(a) * rw; + float ry = p1->y + sinf(a) * rw; + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + nvg__vset(dst, rx, ry, ru, 1); + dst++; + } + + nvg__vset(dst, lx1, ly1, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx1 * rw, p1->y - dly1 * rw, ru, 1); + dst++; + + } + else { + float rx0, ry0, rx1, ry1, a0, a1; + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, -rw, &rx0, &ry0, &rx1, &ry1); + a0 = atan2f(dly0, dlx0); + a1 = atan2f(dly1, dlx1); + if(a1 < a0) a1 += NVG_PI * 2; + + nvg__vset(dst, p1->x + dlx0 * rw, p1->y + dly0 * rw, lu, 1); + dst++; + nvg__vset(dst, rx0, ry0, ru, 1); + dst++; + + n = nvg__clampi((int)ceilf(((a1 - a0) / NVG_PI) * ncap), 2, ncap); + for(i = 0; i < n; i++) { + float u = i / (float)(n - 1); + float a = a0 + u * (a1 - a0); + float lx = p1->x + cosf(a) * lw; + float ly = p1->y + sinf(a) * lw; + nvg__vset(dst, lx, ly, lu, 1); + dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + } + + nvg__vset(dst, p1->x + dlx1 * rw, p1->y + dly1 * rw, lu, 1); + dst++; + nvg__vset(dst, rx1, ry1, ru, 1); + dst++; + + } + return dst; +} + +static NVGvertex * nvg__bevelJoin(NVGvertex * dst, NVGpoint * p0, NVGpoint * p1, + float lw, float rw, float lu, float ru, float fringe) +{ + float rx0, ry0, rx1, ry1; + float lx0, ly0, lx1, ly1; + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + NVG_NOTUSED(fringe); + + if(p1->flags & NVG_PT_LEFT) { + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, lw, &lx0, &ly0, &lx1, &ly1); + + nvg__vset(dst, lx0, ly0, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx0 * rw, p1->y - dly0 * rw, ru, 1); + dst++; + + if(p1->flags & NVG_PT_BEVEL) { + nvg__vset(dst, lx0, ly0, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx0 * rw, p1->y - dly0 * rw, ru, 1); + dst++; + + nvg__vset(dst, lx1, ly1, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx1 * rw, p1->y - dly1 * rw, ru, 1); + dst++; + } + else { + rx0 = p1->x - p1->dmx * rw; + ry0 = p1->y - p1->dmy * rw; + + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + nvg__vset(dst, p1->x - dlx0 * rw, p1->y - dly0 * rw, ru, 1); + dst++; + + nvg__vset(dst, rx0, ry0, ru, 1); + dst++; + nvg__vset(dst, rx0, ry0, ru, 1); + dst++; + + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + nvg__vset(dst, p1->x - dlx1 * rw, p1->y - dly1 * rw, ru, 1); + dst++; + } + + nvg__vset(dst, lx1, ly1, lu, 1); + dst++; + nvg__vset(dst, p1->x - dlx1 * rw, p1->y - dly1 * rw, ru, 1); + dst++; + + } + else { + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, -rw, &rx0, &ry0, &rx1, &ry1); + + nvg__vset(dst, p1->x + dlx0 * lw, p1->y + dly0 * lw, lu, 1); + dst++; + nvg__vset(dst, rx0, ry0, ru, 1); + dst++; + + if(p1->flags & NVG_PT_BEVEL) { + nvg__vset(dst, p1->x + dlx0 * lw, p1->y + dly0 * lw, lu, 1); + dst++; + nvg__vset(dst, rx0, ry0, ru, 1); + dst++; + + nvg__vset(dst, p1->x + dlx1 * lw, p1->y + dly1 * lw, lu, 1); + dst++; + nvg__vset(dst, rx1, ry1, ru, 1); + dst++; + } + else { + lx0 = p1->x + p1->dmx * lw; + ly0 = p1->y + p1->dmy * lw; + + nvg__vset(dst, p1->x + dlx0 * lw, p1->y + dly0 * lw, lu, 1); + dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + + nvg__vset(dst, lx0, ly0, lu, 1); + dst++; + nvg__vset(dst, lx0, ly0, lu, 1); + dst++; + + nvg__vset(dst, p1->x + dlx1 * lw, p1->y + dly1 * lw, lu, 1); + dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f, 1); + dst++; + } + + nvg__vset(dst, p1->x + dlx1 * lw, p1->y + dly1 * lw, lu, 1); + dst++; + nvg__vset(dst, rx1, ry1, ru, 1); + dst++; + } + + return dst; +} + +static NVGvertex * nvg__buttCapStart(NVGvertex * dst, NVGpoint * p, + float dx, float dy, float w, float d, + float aa, float u0, float u1) +{ + float px = p->x - dx * d; + float py = p->y - dy * d; + float dlx = dy; + float dly = -dx; + nvg__vset(dst, px + dlx * w - dx * aa, py + dly * w - dy * aa, u0, 0); + dst++; + nvg__vset(dst, px - dlx * w - dx * aa, py - dly * w - dy * aa, u1, 0); + dst++; + nvg__vset(dst, px + dlx * w, py + dly * w, u0, 1); + dst++; + nvg__vset(dst, px - dlx * w, py - dly * w, u1, 1); + dst++; + return dst; +} + +static NVGvertex * nvg__buttCapEnd(NVGvertex * dst, NVGpoint * p, + float dx, float dy, float w, float d, + float aa, float u0, float u1) +{ + float px = p->x + dx * d; + float py = p->y + dy * d; + float dlx = dy; + float dly = -dx; + nvg__vset(dst, px + dlx * w, py + dly * w, u0, 1); + dst++; + nvg__vset(dst, px - dlx * w, py - dly * w, u1, 1); + dst++; + nvg__vset(dst, px + dlx * w + dx * aa, py + dly * w + dy * aa, u0, 0); + dst++; + nvg__vset(dst, px - dlx * w + dx * aa, py - dly * w + dy * aa, u1, 0); + dst++; + return dst; +} + + +static NVGvertex * nvg__roundCapStart(NVGvertex * dst, NVGpoint * p, + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) +{ + int i; + float px = p->x; + float py = p->y; + float dlx = dy; + float dly = -dx; + NVG_NOTUSED(aa); + for(i = 0; i < ncap; i++) { + float a = i / (float)(ncap - 1) * NVG_PI; + float ax = cosf(a) * w, ay = sinf(a) * w; + nvg__vset(dst, px - dlx * ax - dx * ay, py - dly * ax - dy * ay, u0, 1); + dst++; + nvg__vset(dst, px, py, 0.5f, 1); + dst++; + } + nvg__vset(dst, px + dlx * w, py + dly * w, u0, 1); + dst++; + nvg__vset(dst, px - dlx * w, py - dly * w, u1, 1); + dst++; + return dst; +} + +static NVGvertex * nvg__roundCapEnd(NVGvertex * dst, NVGpoint * p, + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) +{ + int i; + float px = p->x; + float py = p->y; + float dlx = dy; + float dly = -dx; + NVG_NOTUSED(aa); + nvg__vset(dst, px + dlx * w, py + dly * w, u0, 1); + dst++; + nvg__vset(dst, px - dlx * w, py - dly * w, u1, 1); + dst++; + for(i = 0; i < ncap; i++) { + float a = i / (float)(ncap - 1) * NVG_PI; + float ax = cosf(a) * w, ay = sinf(a) * w; + nvg__vset(dst, px, py, 0.5f, 1); + dst++; + nvg__vset(dst, px - dlx * ax + dx * ay, py - dly * ax + dy * ay, u0, 1); + dst++; + } + return dst; +} + + +static void nvg__calculateJoins(NVGcontext * ctx, float w, int lineJoin, float miterLimit) +{ + NVGpathCache * cache = ctx->cache; + int i, j; + float iw = 0.0f; + + if(w > 0.0f) iw = 1.0f / w; + + // Calculate which joins needs extra vertices to append, and gather vertex count. + for(i = 0; i < cache->npaths; i++) { + NVGpath * path = &cache->paths[i]; + NVGpoint * pts = &cache->points[path->first]; + NVGpoint * p0 = &pts[path->count - 1]; + NVGpoint * p1 = &pts[0]; + int nleft = 0; + + path->nbevel = 0; + + for(j = 0; j < path->count; j++) { + float dlx0, dly0, dlx1, dly1, dmr2, cross, limit; + dlx0 = p0->dy; + dly0 = -p0->dx; + dlx1 = p1->dy; + dly1 = -p1->dx; + // Calculate extrusions + p1->dmx = (dlx0 + dlx1) * 0.5f; + p1->dmy = (dly0 + dly1) * 0.5f; + dmr2 = p1->dmx * p1->dmx + p1->dmy * p1->dmy; + if(dmr2 > 0.000001f) { + float scale = 1.0f / dmr2; + if(scale > 600.0f) { + scale = 600.0f; + } + p1->dmx *= scale; + p1->dmy *= scale; + } + + // Clear flags, but keep the corner. + p1->flags = (p1->flags & NVG_PT_CORNER) ? NVG_PT_CORNER : 0; + + // Keep track of left turns. + cross = p1->dx * p0->dy - p0->dx * p1->dy; + if(cross > 0.0f) { + nleft++; + p1->flags |= NVG_PT_LEFT; + } + + // Calculate if we should use bevel or miter for inner join. + limit = nvg__maxf(1.01f, nvg__minf(p0->len, p1->len) * iw); + if((dmr2 * limit * limit) < 1.0f) + p1->flags |= NVG_PR_INNERBEVEL; + + // Check to see if the corner needs to be beveled. + if(p1->flags & NVG_PT_CORNER) { + if((dmr2 * miterLimit * miterLimit) < 1.0f || lineJoin == NVG_BEVEL || lineJoin == NVG_ROUND) { + p1->flags |= NVG_PT_BEVEL; + } + } + + if((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) + path->nbevel++; + + p0 = p1++; + } + + path->convex = (nleft == path->count) ? 1 : 0; + } +} + + +static int nvg__expandStroke(NVGcontext * ctx, float w, float fringe, int lineCap, int lineJoin, float miterLimit) +{ + NVGpathCache * cache = ctx->cache; + NVGvertex * verts; + NVGvertex * dst; + int cverts, i, j; + float aa = fringe;//ctx->fringeWidth; + float u0 = 0.0f, u1 = 1.0f; + int ncap = nvg__curveDivs(w, NVG_PI, ctx->tessTol); // Calculate divisions per half circle. + + w += aa * 0.5f; + + // Disable the gradient used for antialiasing when antialiasing is not used. + if(aa == 0.0f) { + u0 = 0.5f; + u1 = 0.5f; + } + + nvg__calculateJoins(ctx, w, lineJoin, miterLimit); + + // Calculate max vertex usage. + cverts = 0; + for(i = 0; i < cache->npaths; i++) { + NVGpath * path = &cache->paths[i]; + int loop = (path->closed == 0) ? 0 : 1; + if(lineJoin == NVG_ROUND) + cverts += (path->count + path->nbevel * (ncap + 2) + 1) * 2; // plus one for loop + else + cverts += (path->count + path->nbevel * 5 + 1) * 2; // plus one for loop + if(loop == 0) { + // space for caps + if(lineCap == NVG_ROUND) { + cverts += (ncap * 2 + 2) * 2; + } + else { + cverts += (3 + 3) * 2; + } + } + } + + verts = nvg__allocTempVerts(ctx, cverts); + if(verts == NULL) return 0; + + for(i = 0; i < cache->npaths; i++) { + NVGpath * path = &cache->paths[i]; + NVGpoint * pts = &cache->points[path->first]; + NVGpoint * p0; + NVGpoint * p1; + int s, e, loop; + float dx, dy; + + path->fill = 0; + path->nfill = 0; + + // Calculate fringe or stroke + loop = (path->closed == 0) ? 0 : 1; + dst = verts; + path->stroke = dst; + + if(loop) { + // Looping + p0 = &pts[path->count - 1]; + p1 = &pts[0]; + s = 0; + e = path->count; + } + else { + // Add cap + p0 = &pts[0]; + p1 = &pts[1]; + s = 1; + e = path->count - 1; + } + + if(loop == 0) { + // Add cap + dx = p1->x - p0->x; + dy = p1->y - p0->y; + nvg__normalize(&dx, &dy); + if(lineCap == NVG_BUTT) + dst = nvg__buttCapStart(dst, p0, dx, dy, w, -aa * 0.5f, aa, u0, u1); + else if(lineCap == NVG_BUTT || lineCap == NVG_SQUARE) + dst = nvg__buttCapStart(dst, p0, dx, dy, w, w - aa, aa, u0, u1); + else if(lineCap == NVG_ROUND) + dst = nvg__roundCapStart(dst, p0, dx, dy, w, ncap, aa, u0, u1); + } + + for(j = s; j < e; ++j) { + if((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) { + if(lineJoin == NVG_ROUND) { + dst = nvg__roundJoin(dst, p0, p1, w, w, u0, u1, ncap, aa); + } + else { + dst = nvg__bevelJoin(dst, p0, p1, w, w, u0, u1, aa); + } + } + else { + nvg__vset(dst, p1->x + (p1->dmx * w), p1->y + (p1->dmy * w), u0, 1); + dst++; + nvg__vset(dst, p1->x - (p1->dmx * w), p1->y - (p1->dmy * w), u1, 1); + dst++; + } + p0 = p1++; + } + + if(loop) { + // Loop it + nvg__vset(dst, verts[0].x, verts[0].y, u0, 1); + dst++; + nvg__vset(dst, verts[1].x, verts[1].y, u1, 1); + dst++; + } + else { + // Add cap + dx = p1->x - p0->x; + dy = p1->y - p0->y; + nvg__normalize(&dx, &dy); + if(lineCap == NVG_BUTT) + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, -aa * 0.5f, aa, u0, u1); + else if(lineCap == NVG_BUTT || lineCap == NVG_SQUARE) + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, w - aa, aa, u0, u1); + else if(lineCap == NVG_ROUND) + dst = nvg__roundCapEnd(dst, p1, dx, dy, w, ncap, aa, u0, u1); + } + + path->nstroke = (int)(dst - verts); + + verts = dst; + } + + return 1; +} + +static int nvg__expandFill(NVGcontext * ctx, float w, int lineJoin, float miterLimit) +{ + NVGpathCache * cache = ctx->cache; + NVGvertex * verts; + NVGvertex * dst; + int cverts, convex, i, j; + float aa = ctx->fringeWidth; + int fringe = w > 0.0f; + + nvg__calculateJoins(ctx, w, lineJoin, miterLimit); + + // Calculate max vertex usage. + cverts = 0; + for(i = 0; i < cache->npaths; i++) { + NVGpath * path = &cache->paths[i]; + cverts += path->count + path->nbevel + 1; + if(fringe) + cverts += (path->count + path->nbevel * 5 + 1) * 2; // plus one for loop + } + + verts = nvg__allocTempVerts(ctx, cverts); + if(verts == NULL) return 0; + + convex = cache->npaths == 1 && cache->paths[0].convex; + + for(i = 0; i < cache->npaths; i++) { + NVGpath * path = &cache->paths[i]; + NVGpoint * pts = &cache->points[path->first]; + NVGpoint * p0; + NVGpoint * p1; + float rw, lw, woff; + float ru, lu; + + // Calculate shape vertices. + woff = 0.5f * aa; + dst = verts; + path->fill = dst; + + if(fringe) { + // Looping + p0 = &pts[path->count - 1]; + p1 = &pts[0]; + for(j = 0; j < path->count; ++j) { + if(p1->flags & NVG_PT_BEVEL) { + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + if(p1->flags & NVG_PT_LEFT) { + float lx = p1->x + p1->dmx * woff; + float ly = p1->y + p1->dmy * woff; + nvg__vset(dst, lx, ly, 0.5f, 1); + dst++; + } + else { + float lx0 = p1->x + dlx0 * woff; + float ly0 = p1->y + dly0 * woff; + float lx1 = p1->x + dlx1 * woff; + float ly1 = p1->y + dly1 * woff; + nvg__vset(dst, lx0, ly0, 0.5f, 1); + dst++; + nvg__vset(dst, lx1, ly1, 0.5f, 1); + dst++; + } + } + else { + nvg__vset(dst, p1->x + (p1->dmx * woff), p1->y + (p1->dmy * woff), 0.5f, 1); + dst++; + } + p0 = p1++; + } + } + else { + for(j = 0; j < path->count; ++j) { + nvg__vset(dst, pts[j].x, pts[j].y, 0.5f, 1); + dst++; + } + } + + path->nfill = (int)(dst - verts); + verts = dst; + + // Calculate fringe + if(fringe) { + lw = w + woff; + rw = w - woff; + lu = 0; + ru = 1; + dst = verts; + path->stroke = dst; + + // Create only half a fringe for convex shapes so that + // the shape can be rendered without stenciling. + if(convex) { + lw = woff; // This should generate the same vertex as fill inset above. + lu = 0.5f; // Set outline fade at middle. + } + + // Looping + p0 = &pts[path->count - 1]; + p1 = &pts[0]; + + for(j = 0; j < path->count; ++j) { + if((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) { + dst = nvg__bevelJoin(dst, p0, p1, lw, rw, lu, ru, ctx->fringeWidth); + } + else { + nvg__vset(dst, p1->x + (p1->dmx * lw), p1->y + (p1->dmy * lw), lu, 1); + dst++; + nvg__vset(dst, p1->x - (p1->dmx * rw), p1->y - (p1->dmy * rw), ru, 1); + dst++; + } + p0 = p1++; + } + + // Loop it + nvg__vset(dst, verts[0].x, verts[0].y, lu, 1); + dst++; + nvg__vset(dst, verts[1].x, verts[1].y, ru, 1); + dst++; + + path->nstroke = (int)(dst - verts); + verts = dst; + } + else { + path->stroke = NULL; + path->nstroke = 0; + } + } + + return 1; +} + + +// Draw +void nvgBeginPath(NVGcontext * ctx) +{ + ctx->ncommands = 0; + nvg__clearPathCache(ctx); +} + +void nvgMoveTo(NVGcontext * ctx, float x, float y) +{ + float vals[] = { NVG_MOVETO, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgLineTo(NVGcontext * ctx, float x, float y) +{ + float vals[] = { NVG_LINETO, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgBezierTo(NVGcontext * ctx, float c1x, float c1y, float c2x, float c2y, float x, float y) +{ + float vals[] = { NVG_BEZIERTO, c1x, c1y, c2x, c2y, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgQuadTo(NVGcontext * ctx, float cx, float cy, float x, float y) +{ + float x0 = ctx->commandx; + float y0 = ctx->commandy; + float vals[] = { NVG_BEZIERTO, + x0 + 2.0f / 3.0f * (cx - x0), y0 + 2.0f / 3.0f * (cy - y0), + x + 2.0f / 3.0f * (cx - x), y + 2.0f / 3.0f * (cy - y), + x, y + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgArcTo(NVGcontext * ctx, float x1, float y1, float x2, float y2, float radius) +{ + float x0 = ctx->commandx; + float y0 = ctx->commandy; + float dx0, dy0, dx1, dy1, a, d, cx, cy, a0, a1; + int dir; + + if(ctx->ncommands == 0) { + return; + } + + // Handle degenerate cases. + if(nvg__ptEquals(x0, y0, x1, y1, ctx->distTol) || + nvg__ptEquals(x1, y1, x2, y2, ctx->distTol) || + nvg__distPtSeg(x1, y1, x0, y0, x2, y2) < ctx->distTol * ctx->distTol || + radius < ctx->distTol) { + nvgLineTo(ctx, x1, y1); + return; + } + + // Calculate tangential circle to lines (x0,y0)-(x1,y1) and (x1,y1)-(x2,y2). + dx0 = x0 - x1; + dy0 = y0 - y1; + dx1 = x2 - x1; + dy1 = y2 - y1; + nvg__normalize(&dx0, &dy0); + nvg__normalize(&dx1, &dy1); + a = nvg__acosf(dx0 * dx1 + dy0 * dy1); + d = radius / nvg__tanf(a / 2.0f); + + // printf("a=%f° d=%f\n", a/NVG_PI*180.0f, d); + + if(d > 10000.0f) { + nvgLineTo(ctx, x1, y1); + return; + } + + if(nvg__cross(dx0, dy0, dx1, dy1) > 0.0f) { + cx = x1 + dx0 * d + dy0 * radius; + cy = y1 + dy0 * d + -dx0 * radius; + a0 = nvg__atan2f(dx0, -dy0); + a1 = nvg__atan2f(-dx1, dy1); + dir = NVG_CW; + // printf("CW c=(%f, %f) a0=%f° a1=%f°\n", cx, cy, a0/NVG_PI*180.0f, a1/NVG_PI*180.0f); + } + else { + cx = x1 + dx0 * d + -dy0 * radius; + cy = y1 + dy0 * d + dx0 * radius; + a0 = nvg__atan2f(-dx0, dy0); + a1 = nvg__atan2f(dx1, -dy1); + dir = NVG_CCW; + // printf("CCW c=(%f, %f) a0=%f° a1=%f°\n", cx, cy, a0/NVG_PI*180.0f, a1/NVG_PI*180.0f); + } + + nvgArc(ctx, cx, cy, radius, a0, a1, dir); +} + +void nvgClosePath(NVGcontext * ctx) +{ + float vals[] = { NVG_CLOSE }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgPathWinding(NVGcontext * ctx, int dir) +{ + float vals[] = { NVG_WINDING, (float)dir }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgArc(NVGcontext * ctx, float cx, float cy, float r, float a0, float a1, int dir) +{ + float a = 0, da = 0, hda = 0, kappa = 0; + float dx = 0, dy = 0, x = 0, y = 0, tanx = 0, tany = 0; + float px = 0, py = 0, ptanx = 0, ptany = 0; + float vals[3 + 5 * 7 + 100]; + int i, ndivs, nvals; + int move = ctx->ncommands > 0 ? NVG_LINETO : NVG_MOVETO; + + // Clamp angles + da = a1 - a0; + if(dir == NVG_CW) { + if(nvg__absf(da) >= NVG_PI * 2) { + da = NVG_PI * 2; + } + else { + while(da < 0.0f) da += NVG_PI * 2; + } + } + else { + if(nvg__absf(da) >= NVG_PI * 2) { + da = -NVG_PI * 2; + } + else { + while(da > 0.0f) da -= NVG_PI * 2; + } + } + + // Split arc into max 90 degree segments. + ndivs = nvg__maxi(1, nvg__mini((int)(nvg__absf(da) / (NVG_PI * 0.5f) + 0.5f), 5)); + hda = (da / (float)ndivs) / 2.0f; + kappa = nvg__absf(4.0f / 3.0f * (1.0f - nvg__cosf(hda)) / nvg__sinf(hda)); + + if(dir == NVG_CCW) + kappa = -kappa; + + nvals = 0; + for(i = 0; i <= ndivs; i++) { + a = a0 + da * (i / (float)ndivs); + dx = nvg__cosf(a); + dy = nvg__sinf(a); + x = cx + dx * r; + y = cy + dy * r; + tanx = -dy * r * kappa; + tany = dx * r * kappa; + + if(i == 0) { + vals[nvals++] = (float)move; + vals[nvals++] = x; + vals[nvals++] = y; + } + else { + vals[nvals++] = NVG_BEZIERTO; + vals[nvals++] = px + ptanx; + vals[nvals++] = py + ptany; + vals[nvals++] = x - tanx; + vals[nvals++] = y - tany; + vals[nvals++] = x; + vals[nvals++] = y; + } + px = x; + py = y; + ptanx = tanx; + ptany = tany; + } + + nvg__appendCommands(ctx, vals, nvals); +} + +void nvgRect(NVGcontext * ctx, float x, float y, float w, float h) +{ + float vals[] = { + NVG_MOVETO, x, y, + NVG_LINETO, x, y + h, + NVG_LINETO, x + w, y + h, + NVG_LINETO, x + w, y, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgRoundedRect(NVGcontext * ctx, float x, float y, float w, float h, float r) +{ + nvgRoundedRectVarying(ctx, x, y, w, h, r, r, r, r); +} + +void nvgRoundedRectVarying(NVGcontext * ctx, float x, float y, float w, float h, float radTopLeft, float radTopRight, + float radBottomRight, float radBottomLeft) +{ + if(radTopLeft < 0.1f && radTopRight < 0.1f && radBottomRight < 0.1f && radBottomLeft < 0.1f) { + nvgRect(ctx, x, y, w, h); + return; + } + else { + float halfw = nvg__absf(w) * 0.5f; + float halfh = nvg__absf(h) * 0.5f; + float rxBL = nvg__minf(radBottomLeft, halfw) * nvg__signf(w), ryBL = nvg__minf(radBottomLeft, halfh) * nvg__signf(h); + float rxBR = nvg__minf(radBottomRight, halfw) * nvg__signf(w), ryBR = nvg__minf(radBottomRight, halfh) * nvg__signf(h); + float rxTR = nvg__minf(radTopRight, halfw) * nvg__signf(w), ryTR = nvg__minf(radTopRight, halfh) * nvg__signf(h); + float rxTL = nvg__minf(radTopLeft, halfw) * nvg__signf(w), ryTL = nvg__minf(radTopLeft, halfh) * nvg__signf(h); + float vals[] = { + NVG_MOVETO, x, y + ryTL, + NVG_LINETO, x, y + h - ryBL, + NVG_BEZIERTO, x, y + h - ryBL * (1 - NVG_KAPPA90), x + rxBL * (1 - NVG_KAPPA90), y + h, x + rxBL, y + h, + NVG_LINETO, x + w - rxBR, y + h, + NVG_BEZIERTO, x + w - rxBR * (1 - NVG_KAPPA90), y + h, x + w, y + h - ryBR * (1 - NVG_KAPPA90), x + w, y + h - ryBR, + NVG_LINETO, x + w, y + ryTR, + NVG_BEZIERTO, x + w, y + ryTR * (1 - NVG_KAPPA90), x + w - rxTR * (1 - NVG_KAPPA90), y, x + w - rxTR, y, + NVG_LINETO, x + rxTL, y, + NVG_BEZIERTO, x + rxTL * (1 - NVG_KAPPA90), y, x, y + ryTL * (1 - NVG_KAPPA90), x, y + ryTL, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); + } +} + +void nvgEllipse(NVGcontext * ctx, float cx, float cy, float rx, float ry) +{ + float vals[] = { + NVG_MOVETO, cx - rx, cy, + NVG_BEZIERTO, cx - rx, cy + ry * NVG_KAPPA90, cx - rx * NVG_KAPPA90, cy + ry, cx, cy + ry, + NVG_BEZIERTO, cx + rx * NVG_KAPPA90, cy + ry, cx + rx, cy + ry * NVG_KAPPA90, cx + rx, cy, + NVG_BEZIERTO, cx + rx, cy - ry * NVG_KAPPA90, cx + rx * NVG_KAPPA90, cy - ry, cx, cy - ry, + NVG_BEZIERTO, cx - rx * NVG_KAPPA90, cy - ry, cx - rx, cy - ry * NVG_KAPPA90, cx - rx, cy, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgCircle(NVGcontext * ctx, float cx, float cy, float r) +{ + nvgEllipse(ctx, cx, cy, r, r); +} + +void nvgDebugDumpPathCache(NVGcontext * ctx) +{ + const NVGpath * path; + int i, j; + + LV_LOG_USER("Dumping %d cached paths", ctx->cache->npaths); + for(i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + LV_LOG_USER(" - Path %d", i); + if(path->nfill) { + LV_LOG_USER(" - fill: %d", path->nfill); + for(j = 0; j < path->nfill; j++) + LV_LOG_USER("%f\t%f", path->fill[j].x, path->fill[j].y); + } + if(path->nstroke) { + LV_LOG_USER(" - stroke: %d", path->nstroke); + for(j = 0; j < path->nstroke; j++) + LV_LOG_USER("%f\t%f", path->stroke[j].x, path->stroke[j].y); + } + } +} + +void nvgFill(NVGcontext * ctx) +{ + NVGstate * state = nvg__getState(ctx); + const NVGpath * path; + NVGpaint fillPaint = state->fill; + int i; + + nvg__flattenPaths(ctx); + if(ctx->params.edgeAntiAlias && state->shapeAntiAlias) + nvg__expandFill(ctx, ctx->fringeWidth, NVG_MITER, 2.4f); + else + nvg__expandFill(ctx, 0.0f, NVG_MITER, 2.4f); + + // Apply global alpha + fillPaint.innerColor.ch.a *= state->alpha; + fillPaint.outerColor.ch.a *= state->alpha; + + ctx->params.renderFill(ctx->params.userPtr, &fillPaint, state->compositeOperation, &state->scissor, ctx->fringeWidth, + ctx->cache->bounds, ctx->cache->paths, ctx->cache->npaths); + + // Count triangles + for(i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + ctx->fillTriCount += path->nfill - 2; + ctx->fillTriCount += path->nstroke - 2; + ctx->drawCallCount += 2; + } +} + +void nvgStroke(NVGcontext * ctx) +{ + NVGstate * state = nvg__getState(ctx); + float scale = nvg__getAverageScale(state->xform); + float strokeWidth = nvg__clampf(state->strokeWidth * scale, 0.0f, 200.0f); + NVGpaint strokePaint = state->stroke; + const NVGpath * path; + int i; + + + if(strokeWidth < ctx->fringeWidth) { + // If the stroke width is less than pixel size, use alpha to emulate coverage. + // Since coverage is area, scale by alpha*alpha. + float alpha = nvg__clampf(strokeWidth / ctx->fringeWidth, 0.0f, 1.0f); + strokePaint.innerColor.ch.a *= alpha * alpha; + strokePaint.outerColor.ch.a *= alpha * alpha; + strokeWidth = ctx->fringeWidth; + } + + // Apply global alpha + strokePaint.innerColor.ch.a *= state->alpha; + strokePaint.outerColor.ch.a *= state->alpha; + + nvg__flattenPaths(ctx); + + if(ctx->params.edgeAntiAlias && state->shapeAntiAlias) + nvg__expandStroke(ctx, strokeWidth * 0.5f, ctx->fringeWidth, state->lineCap, state->lineJoin, state->miterLimit); + else + nvg__expandStroke(ctx, strokeWidth * 0.5f, 0.0f, state->lineCap, state->lineJoin, state->miterLimit); + + ctx->params.renderStroke(ctx->params.userPtr, &strokePaint, state->compositeOperation, &state->scissor, + ctx->fringeWidth, + strokeWidth, ctx->cache->paths, ctx->cache->npaths); + + // Count triangles + for(i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + ctx->strokeTriCount += path->nstroke - 2; + ctx->drawCallCount++; + } +} + +// Add fonts +int nvgCreateFont(NVGcontext * ctx, const char * name, const char * filename) +{ + (void)ctx; + (void)name; + (void)filename; + return -1; +} + +int nvgCreateFontAtIndex(NVGcontext * ctx, const char * name, const char * filename, const int fontIndex) +{ + (void)ctx; + (void)name; + (void)filename; + (void)fontIndex; + return -1; +} + +int nvgCreateFontMem(NVGcontext * ctx, const char * name, unsigned char * data, int ndata, int freeData) +{ + (void)ctx; + (void)name; + (void)data; + (void)ndata; + (void)freeData; + return -1; +} + +int nvgCreateFontMemAtIndex(NVGcontext * ctx, const char * name, unsigned char * data, int ndata, int freeData, + const int fontIndex) +{ + (void)ctx; + (void)name; + (void)data; + (void)ndata; + (void)freeData; + (void)fontIndex; + return -1; +} + +int nvgFindFont(NVGcontext * ctx, const char * name) +{ + (void)ctx; + (void)name; + return -1; +} + + +int nvgAddFallbackFontId(NVGcontext * ctx, int baseFont, int fallbackFont) +{ + (void)ctx; + (void)baseFont; + (void)fallbackFont; + return 0; +} + +int nvgAddFallbackFont(NVGcontext * ctx, const char * baseFont, const char * fallbackFont) +{ + (void)ctx; + (void)baseFont; + (void)fallbackFont; + return 0; +} + +void nvgResetFallbackFontsId(NVGcontext * ctx, int baseFont) +{ + (void)ctx; + (void)baseFont; +} + +void nvgResetFallbackFonts(NVGcontext * ctx, const char * baseFont) +{ + (void)ctx; + (void)baseFont; +} + +// State setting +void nvgFontSize(NVGcontext * ctx, float size) +{ + NVGstate * state = nvg__getState(ctx); + state->fontSize = size; +} + +void nvgFontBlur(NVGcontext * ctx, float blur) +{ + NVGstate * state = nvg__getState(ctx); + state->fontBlur = blur; +} + +void nvgTextLetterSpacing(NVGcontext * ctx, float spacing) +{ + NVGstate * state = nvg__getState(ctx); + state->letterSpacing = spacing; +} + +void nvgTextLineHeight(NVGcontext * ctx, float lineHeight) +{ + NVGstate * state = nvg__getState(ctx); + state->lineHeight = lineHeight; +} + +void nvgTextAlign(NVGcontext * ctx, int align) +{ + NVGstate * state = nvg__getState(ctx); + state->textAlign = align; +} + +void nvgFontFaceId(NVGcontext * ctx, int font) +{ + NVGstate * state = nvg__getState(ctx); + state->fontId = font; +} + +void nvgFontFace(NVGcontext * ctx, const char * font) +{ + (void)ctx; + (void)font; +} + +float nvgText(NVGcontext * ctx, float x, float y, const char * string, const char * end) +{ + (void)ctx; + (void)x; + (void)y; + (void)string; + (void)end; + return 0; +} + +void nvgTextBox(NVGcontext * ctx, float x, float y, float breakRowWidth, const char * string, const char * end) +{ + (void)ctx; + (void)x; + (void)y; + (void)breakRowWidth; + (void)string; + (void)end; +} + +int nvgTextBreakLines(NVGcontext * ctx, const char * string, const char * end, float breakRowWidth, NVGtextRow * rows, + int maxRows) +{ + (void)ctx; + (void)string; + (void)end; + (void)breakRowWidth; + (void)rows; + (void)maxRows; + return 0; +} + +float nvgTextBounds(NVGcontext * ctx, float x, float y, const char * string, const char * end, float * bounds) +{ + (void)ctx; + (void)x; + (void)y; + (void)string; + (void)end; + (void)bounds; + return 0; +} + +void nvgTextBoxBounds(NVGcontext * ctx, float x, float y, float breakRowWidth, const char * string, const char * end, + float * bounds) +{ + (void)ctx; + (void)x; + (void)y; + (void)breakRowWidth; + (void)string; + (void)end; + (void)bounds; +} + +void nvgTextMetrics(NVGcontext * ctx, float * ascender, float * descender, float * lineh) +{ + (void)ctx; + (void)ascender; + (void)descender; + (void)lineh; +} +// vim: ft=c nu noet ts=4 + +#endif /* LV_USE_NANOVG */ diff --git a/src/libs/nanovg/nanovg.h b/src/libs/nanovg/nanovg.h new file mode 100644 index 0000000000..ece8bd60be --- /dev/null +++ b/src/libs/nanovg/nanovg.h @@ -0,0 +1,707 @@ +// +// Copyright (c) 2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef NANOVG_H +#define NANOVG_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "../../lv_conf_internal.h" + +#if LV_USE_NANOVG + +#define NVG_PI 3.14159265358979323846264338327f + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +#endif + +typedef struct NVGcontext NVGcontext; + +union NVGcolor { + float rgba[4]; + struct { + float r, g, b, a; + } ch; +}; +typedef union NVGcolor NVGcolor; + +struct NVGpaint { + float xform[6]; + float extent[2]; + float radius; + float feather; + NVGcolor innerColor; + NVGcolor outerColor; + int image; +}; +typedef struct NVGpaint NVGpaint; + +enum NVGwinding { + NVG_CCW = 1, // Winding for solid shapes + NVG_CW = 2, // Winding for holes +}; + +enum NVGsolidity { + NVG_SOLID = 1, // CCW + NVG_HOLE = 2, // CW +}; + +enum NVGlineCap { + NVG_BUTT, + NVG_ROUND, + NVG_SQUARE, + NVG_BEVEL, + NVG_MITER, +}; + +enum NVGalign { + // Horizontal align + NVG_ALIGN_LEFT = 1 << 0, // Default, align text horizontally to left. + NVG_ALIGN_CENTER = 1 << 1, // Align text horizontally to center. + NVG_ALIGN_RIGHT = 1 << 2, // Align text horizontally to right. + // Vertical align + NVG_ALIGN_TOP = 1 << 3, // Align text vertically to top. + NVG_ALIGN_MIDDLE = 1 << 4, // Align text vertically to middle. + NVG_ALIGN_BOTTOM = 1 << 5, // Align text vertically to bottom. + NVG_ALIGN_BASELINE = 1 << 6, // Default, align text vertically to baseline. +}; + +enum NVGblendFactor { + NVG_ZERO = 1 << 0, + NVG_ONE = 1 << 1, + NVG_SRC_COLOR = 1 << 2, + NVG_ONE_MINUS_SRC_COLOR = 1 << 3, + NVG_DST_COLOR = 1 << 4, + NVG_ONE_MINUS_DST_COLOR = 1 << 5, + NVG_SRC_ALPHA = 1 << 6, + NVG_ONE_MINUS_SRC_ALPHA = 1 << 7, + NVG_DST_ALPHA = 1 << 8, + NVG_ONE_MINUS_DST_ALPHA = 1 << 9, + NVG_SRC_ALPHA_SATURATE = 1 << 10, +}; + +enum NVGcompositeOperation { + NVG_SOURCE_OVER, + NVG_SOURCE_IN, + NVG_SOURCE_OUT, + NVG_ATOP, + NVG_DESTINATION_OVER, + NVG_DESTINATION_IN, + NVG_DESTINATION_OUT, + NVG_DESTINATION_ATOP, + NVG_LIGHTER, + NVG_COPY, + NVG_XOR, +}; + +struct NVGcompositeOperationState { + int srcRGB; + int dstRGB; + int srcAlpha; + int dstAlpha; +}; +typedef struct NVGcompositeOperationState NVGcompositeOperationState; + +struct NVGglyphPosition { + const char * str; // Position of the glyph in the input string. + float x; // The x-coordinate of the logical glyph position. + float minx, maxx; // The bounds of the glyph shape. +}; +typedef struct NVGglyphPosition NVGglyphPosition; + +struct NVGtextRow { + const char * start; // Pointer to the input text where the row starts. + const char * end; // Pointer to the input text where the row ends (one past the last character). + const char * next; // Pointer to the beginning of the next row. + float width; // Logical width of the row. + float minx, + maxx; // Actual bounds of the row. Logical with and bounds can differ because of kerning and some parts over extending. +}; +typedef struct NVGtextRow NVGtextRow; + +enum NVGimageFlags { + NVG_IMAGE_GENERATE_MIPMAPS = 1 << 0, // Generate mipmaps during creation of the image. + NVG_IMAGE_REPEATX = 1 << 1, // Repeat image in X direction. + NVG_IMAGE_REPEATY = 1 << 2, // Repeat image in Y direction. + NVG_IMAGE_FLIPY = 1 << 3, // Flips (inverses) image in Y direction when rendered. + NVG_IMAGE_PREMULTIPLIED = 1 << 4, // Image data has premultiplied alpha. + NVG_IMAGE_NEAREST = 1 << 5, // Image interpolation is Nearest instead Linear +}; + +// Begin drawing a new frame +// Calls to nanovg drawing API should be wrapped in nvgBeginFrame() & nvgEndFrame() +// nvgBeginFrame() defines the size of the window to render to in relation currently +// set viewport (i.e. glViewport on GL backends). Device pixel ration allows to +// control the rendering on Hi-DPI devices. +// For example, GLFW returns two dimension for an opened window: window size and +// frame buffer size. In that case you would set windowWidth/Height to the window size +// devicePixelRatio to: frameBufferWidth / windowWidth. +void nvgBeginFrame(NVGcontext * ctx, float windowWidth, float windowHeight, float devicePixelRatio); + +// Cancels drawing the current frame. +void nvgCancelFrame(NVGcontext * ctx); + +// Ends drawing flushing remaining render state. +void nvgEndFrame(NVGcontext * ctx); + +// +// Composite operation +// +// The composite operations in NanoVG are modeled after HTML Canvas API, and +// the blend func is based on OpenGL (see corresponding manuals for more info). +// The colors in the blending state have premultiplied alpha. + +// Sets the composite operation. The op parameter should be one of NVGcompositeOperation. +void nvgGlobalCompositeOperation(NVGcontext * ctx, int op); + +// Sets the composite operation with custom pixel arithmetic. The parameters should be one of NVGblendFactor. +void nvgGlobalCompositeBlendFunc(NVGcontext * ctx, int sfactor, int dfactor); + +// Sets the composite operation with custom pixel arithmetic for RGB and alpha components separately. The parameters should be one of NVGblendFactor. +void nvgGlobalCompositeBlendFuncSeparate(NVGcontext * ctx, int srcRGB, int dstRGB, int srcAlpha, int dstAlpha); + +// +// Color utils +// +// Colors in NanoVG are stored as unsigned ints in ABGR format. + +// Returns a color value from red, green, blue values. Alpha will be set to 255 (1.0f). +NVGcolor nvgRGB(unsigned char r, unsigned char g, unsigned char b); + +// Returns a color value from red, green, blue values. Alpha will be set to 1.0f. +NVGcolor nvgRGBf(float r, float g, float b); + + +// Returns a color value from red, green, blue and alpha values. +NVGcolor nvgRGBA(unsigned char r, unsigned char g, unsigned char b, unsigned char a); + +// Returns a color value from red, green, blue and alpha values. +NVGcolor nvgRGBAf(float r, float g, float b, float a); + + +// Linearly interpolates from color c0 to c1, and returns resulting color value. +NVGcolor nvgLerpRGBA(NVGcolor c0, NVGcolor c1, float u); + +// Sets transparency of a color value. +NVGcolor nvgTransRGBA(NVGcolor c0, unsigned char a); + +// Sets transparency of a color value. +NVGcolor nvgTransRGBAf(NVGcolor c0, float a); + +// Returns color value specified by hue, saturation and lightness. +// HSL values are all in range [0..1], alpha will be set to 255. +NVGcolor nvgHSL(float h, float s, float l); + +// Returns color value specified by hue, saturation and lightness and alpha. +// HSL values are all in range [0..1], alpha in range [0..255] +NVGcolor nvgHSLA(float h, float s, float l, unsigned char a); + +// +// State Handling +// +// NanoVG contains state which represents how paths will be rendered. +// The state contains transform, fill and stroke styles, text and font styles, +// and scissor clipping. + +// Pushes and saves the current render state into a state stack. +// A matching nvgRestore() must be used to restore the state. +void nvgSave(NVGcontext * ctx); + +// Pops and restores current render state. +void nvgRestore(NVGcontext * ctx); + +// Resets current render state to default values. Does not affect the render state stack. +void nvgReset(NVGcontext * ctx); + +// +// Render styles +// +// Fill and stroke render style can be either a solid color or a paint which is a gradient or a pattern. +// Solid color is simply defined as a color value, different kinds of paints can be created +// using nvgLinearGradient(), nvgBoxGradient(), nvgRadialGradient() and nvgImagePattern(). +// +// Current render style can be saved and restored using nvgSave() and nvgRestore(). + +// Sets whether to draw antialias for nvgStroke() and nvgFill(). It's enabled by default. +void nvgShapeAntiAlias(NVGcontext * ctx, int enabled); + +// Sets current stroke style to a solid color. +void nvgStrokeColor(NVGcontext * ctx, NVGcolor color); + +// Sets current stroke style to a paint, which can be a one of the gradients or a pattern. +void nvgStrokePaint(NVGcontext * ctx, NVGpaint paint); + +// Sets current fill style to a solid color. +void nvgFillColor(NVGcontext * ctx, NVGcolor color); + +// Sets current fill style to a paint, which can be a one of the gradients or a pattern. +void nvgFillPaint(NVGcontext * ctx, NVGpaint paint); + +// Sets the miter limit of the stroke style. +// Miter limit controls when a sharp corner is beveled. +void nvgMiterLimit(NVGcontext * ctx, float limit); + +// Sets the stroke width of the stroke style. +void nvgStrokeWidth(NVGcontext * ctx, float size); + +// Sets how the end of the line (cap) is drawn, +// Can be one of: NVG_BUTT (default), NVG_ROUND, NVG_SQUARE. +void nvgLineCap(NVGcontext * ctx, int cap); + +// Sets how sharp path corners are drawn. +// Can be one of NVG_MITER (default), NVG_ROUND, NVG_BEVEL. +void nvgLineJoin(NVGcontext * ctx, int join); + +// Sets the transparency applied to all rendered shapes. +// Already transparent paths will get proportionally more transparent as well. +void nvgGlobalAlpha(NVGcontext * ctx, float alpha); + +// +// Transforms +// +// The paths, gradients, patterns and scissor region are transformed by an transformation +// matrix at the time when they are passed to the API. +// The current transformation matrix is a affine matrix: +// [sx kx tx] +// [ky sy ty] +// [ 0 0 1] +// Where: sx,sy define scaling, kx,ky skewing, and tx,ty translation. +// The last row is assumed to be 0,0,1 and is not stored. +// +// Apart from nvgResetTransform(), each transformation function first creates +// specific transformation matrix and pre-multiplies the current transformation by it. +// +// Current coordinate system (transformation) can be saved and restored using nvgSave() and nvgRestore(). + +// Resets current transform to a identity matrix. +void nvgResetTransform(NVGcontext * ctx); + +// Premultiplies current coordinate system by specified matrix. +// The parameters are interpreted as matrix as follows: +// [a c e] +// [b d f] +// [0 0 1] +void nvgTransform(NVGcontext * ctx, float a, float b, float c, float d, float e, float f); + +// Translates current coordinate system. +void nvgTranslate(NVGcontext * ctx, float x, float y); + +// Rotates current coordinate system. Angle is specified in radians. +void nvgRotate(NVGcontext * ctx, float angle); + +// Skews the current coordinate system along X axis. Angle is specified in radians. +void nvgSkewX(NVGcontext * ctx, float angle); + +// Skews the current coordinate system along Y axis. Angle is specified in radians. +void nvgSkewY(NVGcontext * ctx, float angle); + +// Scales the current coordinate system. +void nvgScale(NVGcontext * ctx, float x, float y); + +// Stores the top part (a-f) of the current transformation matrix in to the specified buffer. +// [a c e] +// [b d f] +// [0 0 1] +// There should be space for 6 floats in the return buffer for the values a-f. +void nvgCurrentTransform(NVGcontext * ctx, float * xform); + + +// The following functions can be used to make calculations on 2x3 transformation matrices. +// A 2x3 matrix is represented as float[6]. + +// Sets the transform to identity matrix. +void nvgTransformIdentity(float * dst); + +// Sets the transform to translation matrix matrix. +void nvgTransformTranslate(float * dst, float tx, float ty); + +// Sets the transform to scale matrix. +void nvgTransformScale(float * dst, float sx, float sy); + +// Sets the transform to rotate matrix. Angle is specified in radians. +void nvgTransformRotate(float * dst, float a); + +// Sets the transform to skew-x matrix. Angle is specified in radians. +void nvgTransformSkewX(float * dst, float a); + +// Sets the transform to skew-y matrix. Angle is specified in radians. +void nvgTransformSkewY(float * dst, float a); + +// Sets the transform to the result of multiplication of two transforms, of A = A*B. +void nvgTransformMultiply(float * dst, const float * src); + +// Sets the transform to the result of multiplication of two transforms, of A = B*A. +void nvgTransformPremultiply(float * dst, const float * src); + +// Sets the destination to inverse of specified transform. +// Returns 1 if the inverse could be calculated, else 0. +int nvgTransformInverse(float * dst, const float * src); + +// Transform a point by given transform. +void nvgTransformPoint(float * dstx, float * dsty, const float * xform, float srcx, float srcy); + +// Converts degrees to radians and vice versa. +float nvgDegToRad(float deg); +float nvgRadToDeg(float rad); + +// +// Images +// +// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering. +// In addition you can upload your own image. The image loading is provided by stb_image. +// The parameter imageFlags is combination of flags defined in NVGimageFlags. + +// Creates image from specified image data with custom format. +// format: see NVGtexture. +// Returns handle to the image. +int nvgCreateImage(NVGcontext * ctx, int w, int h, int imageFlags, int format, const unsigned char * data); + +// Updates image data specified by image handle. +void nvgUpdateImage(NVGcontext * ctx, int image, const unsigned char * data); + +// Returns the dimensions of a created image. +void nvgImageSize(NVGcontext * ctx, int image, int * w, int * h); + +// Deletes created image. +void nvgDeleteImage(NVGcontext * ctx, int image); + +// +// Paints +// +// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern. +// These can be used as paints for strokes and fills. + +// Creates and returns a linear gradient. Parameters (sx,sy)-(ex,ey) specify the start and end coordinates +// of the linear gradient, icol specifies the start color and ocol the end color. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgLinearGradient(NVGcontext * ctx, float sx, float sy, float ex, float ey, + NVGcolor icol, NVGcolor ocol); + +// Creates and returns a box gradient. Box gradient is a feathered rounded rectangle, it is useful for rendering +// drop shadows or highlights for boxes. Parameters (x,y) define the top-left corner of the rectangle, +// (w,h) define the size of the rectangle, r defines the corner radius, and f feather. Feather defines how blurry +// the border of the rectangle is. Parameter icol specifies the inner color and ocol the outer color of the gradient. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgBoxGradient(NVGcontext * ctx, float x, float y, float w, float h, + float r, float f, NVGcolor icol, NVGcolor ocol); + +// Creates and returns a radial gradient. Parameters (cx,cy) specify the center, inr and outr specify +// the inner and outer radius of the gradient, icol specifies the start color and ocol the end color. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgRadialGradient(NVGcontext * ctx, float cx, float cy, float inr, float outr, + NVGcolor icol, NVGcolor ocol); + +// Creates and returns an image pattern. Parameters (ox,oy) specify the left-top location of the image pattern, +// (ex,ey) the size of one image, angle rotation around the top-left corner, image is handle to the image to render. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgImagePattern(NVGcontext * ctx, float ox, float oy, float ex, float ey, + float angle, int image, float alpha); + +// +// Scissoring +// +// Scissoring allows you to clip the rendering into a rectangle. This is useful for various +// user interface cases like rendering a text edit or a timeline. + +// Sets the current scissor rectangle. +// The scissor rectangle is transformed by the current transform. +void nvgScissor(NVGcontext * ctx, float x, float y, float w, float h); + +// Intersects current scissor rectangle with the specified rectangle. +// The scissor rectangle is transformed by the current transform. +// Note: in case the rotation of previous scissor rect differs from +// the current one, the intersection will be done between the specified +// rectangle and the previous scissor rectangle transformed in the current +// transform space. The resulting shape is always rectangle. +void nvgIntersectScissor(NVGcontext * ctx, float x, float y, float w, float h); + +// Reset and disables scissoring. +void nvgResetScissor(NVGcontext * ctx); + +// +// Paths +// +// Drawing a new shape starts with nvgBeginPath(), it clears all the currently defined paths. +// Then you define one or more paths and sub-paths which describe the shape. The are functions +// to draw common shapes like rectangles and circles, and lower level step-by-step functions, +// which allow to define a path curve by curve. +// +// NanoVG uses even-odd fill rule to draw the shapes. Solid shapes should have counter clockwise +// winding and holes should have counter clockwise order. To specify winding of a path you can +// call nvgPathWinding(). This is useful especially for the common shapes, which are drawn CCW. +// +// Finally you can fill the path using current fill style by calling nvgFill(), and stroke it +// with current stroke style by calling nvgStroke(). +// +// The curve segments and sub-paths are transformed by the current transform. + +// Clears the current path and sub-paths. +void nvgBeginPath(NVGcontext * ctx); + +// Starts new sub-path with specified point as first point. +void nvgMoveTo(NVGcontext * ctx, float x, float y); + +// Adds line segment from the last point in the path to the specified point. +void nvgLineTo(NVGcontext * ctx, float x, float y); + +// Adds cubic bezier segment from last point in the path via two control points to the specified point. +void nvgBezierTo(NVGcontext * ctx, float c1x, float c1y, float c2x, float c2y, float x, float y); + +// Adds quadratic bezier segment from last point in the path via a control point to the specified point. +void nvgQuadTo(NVGcontext * ctx, float cx, float cy, float x, float y); + +// Adds an arc segment at the corner defined by the last path point, and two specified points. +void nvgArcTo(NVGcontext * ctx, float x1, float y1, float x2, float y2, float radius); + +// Closes current sub-path with a line segment. +void nvgClosePath(NVGcontext * ctx); + +// Sets the current sub-path winding, see NVGwinding and NVGsolidity. +void nvgPathWinding(NVGcontext * ctx, int dir); + +// Creates new circle arc shaped sub-path. The arc center is at cx,cy, the arc radius is r, +// and the arc is drawn from angle a0 to a1, and swept in direction dir (NVG_CCW, or NVG_CW). +// Angles are specified in radians. +void nvgArc(NVGcontext * ctx, float cx, float cy, float r, float a0, float a1, int dir); + +// Creates new rectangle shaped sub-path. +void nvgRect(NVGcontext * ctx, float x, float y, float w, float h); + +// Creates new rounded rectangle shaped sub-path. +void nvgRoundedRect(NVGcontext * ctx, float x, float y, float w, float h, float r); + +// Creates new rounded rectangle shaped sub-path with varying radii for each corner. +void nvgRoundedRectVarying(NVGcontext * ctx, float x, float y, float w, float h, float radTopLeft, float radTopRight, + float radBottomRight, float radBottomLeft); + +// Creates new ellipse shaped sub-path. +void nvgEllipse(NVGcontext * ctx, float cx, float cy, float rx, float ry); + +// Creates new circle shaped sub-path. +void nvgCircle(NVGcontext * ctx, float cx, float cy, float r); + +// Fills the current path with current fill style. +void nvgFill(NVGcontext * ctx); + +// Fills the current path with current stroke style. +void nvgStroke(NVGcontext * ctx); + + +// +// Text +// +// NanoVG allows you to load .ttf files and use the font to render text. +// +// The appearance of the text can be defined by setting the current text style +// and by specifying the fill color. Common text and font settings such as +// font size, letter spacing and text align are supported. Font blur allows you +// to create simple text effects such as drop shadows. +// +// At render time the font face can be set based on the font handles or name. +// +// Font measure functions return values in local space, the calculations are +// carried in the same resolution as the final rendering. This is done because +// the text glyph positions are snapped to the nearest pixels sharp rendering. +// +// The local space means that values are not rotated or scale as per the current +// transformation. For example if you set font size to 12, which would mean that +// line height is 16, then regardless of the current scaling and rotation, the +// returned line height is always 16. Some measures may vary because of the scaling +// since aforementioned pixel snapping. +// +// While this may sound a little odd, the setup allows you to always render the +// same way regardless of scaling. I.e. following works regardless of scaling: +// +// const char* txt = "Text me up."; +// nvgTextBounds(vg, x,y, txt, NULL, bounds); +// nvgBeginPath(vg); +// nvgRect(vg, bounds[0],bounds[1], bounds[2]-bounds[0], bounds[3]-bounds[1]); +// nvgFill(vg); +// +// Note: currently only solid color fill is supported for text. + +// Creates font by loading it from the disk from specified file name. +// Returns handle to the font. +int nvgCreateFont(NVGcontext * ctx, const char * name, const char * filename); + +// fontIndex specifies which font face to load from a .ttf/.ttc file. +int nvgCreateFontAtIndex(NVGcontext * ctx, const char * name, const char * filename, const int fontIndex); + +// Creates font by loading it from the specified memory chunk. +// Returns handle to the font. +int nvgCreateFontMem(NVGcontext * ctx, const char * name, unsigned char * data, int ndata, int freeData); + +// fontIndex specifies which font face to load from a .ttf/.ttc file. +int nvgCreateFontMemAtIndex(NVGcontext * ctx, const char * name, unsigned char * data, int ndata, int freeData, + const int fontIndex); + +// Finds a loaded font of specified name, and returns handle to it, or -1 if the font is not found. +int nvgFindFont(NVGcontext * ctx, const char * name); + +// Adds a fallback font by handle. +int nvgAddFallbackFontId(NVGcontext * ctx, int baseFont, int fallbackFont); + +// Adds a fallback font by name. +int nvgAddFallbackFont(NVGcontext * ctx, const char * baseFont, const char * fallbackFont); + +// Resets fallback fonts by handle. +void nvgResetFallbackFontsId(NVGcontext * ctx, int baseFont); + +// Resets fallback fonts by name. +void nvgResetFallbackFonts(NVGcontext * ctx, const char * baseFont); + +// Sets the font size of current text style. +void nvgFontSize(NVGcontext * ctx, float size); + +// Sets the blur of current text style. +void nvgFontBlur(NVGcontext * ctx, float blur); + +// Sets the letter spacing of current text style. +void nvgTextLetterSpacing(NVGcontext * ctx, float spacing); + +// Sets the proportional line height of current text style. The line height is specified as multiple of font size. +void nvgTextLineHeight(NVGcontext * ctx, float lineHeight); + +// Sets the text align of current text style, see NVGalign for options. +void nvgTextAlign(NVGcontext * ctx, int align); + +// Sets the font face based on specified id of current text style. +void nvgFontFaceId(NVGcontext * ctx, int font); + +// Sets the font face based on specified name of current text style. +void nvgFontFace(NVGcontext * ctx, const char * font); + +// Draws text string at specified location. If end is specified only the sub-string up to the end is drawn. +float nvgText(NVGcontext * ctx, float x, float y, const char * string, const char * end); + +// Draws multi-line text string at specified location wrapped at the specified width. If end is specified only the sub-string up to the end is drawn. +// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered. +// Words longer than the max width are slit at nearest character (i.e. no hyphenation). +void nvgTextBox(NVGcontext * ctx, float x, float y, float breakRowWidth, const char * string, const char * end); + +// Measures the specified text string. Parameter bounds should be a pointer to float[4], +// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax] +// Returns the horizontal advance of the measured text (i.e. where the next character should drawn). +// Measured values are returned in local coordinate space. +float nvgTextBounds(NVGcontext * ctx, float x, float y, const char * string, const char * end, float * bounds); + +// Measures the specified multi-text string. Parameter bounds should be a pointer to float[4], +// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax] +// Measured values are returned in local coordinate space. +void nvgTextBoxBounds(NVGcontext * ctx, float x, float y, float breakRowWidth, const char * string, const char * end, + float * bounds); + +// Calculates the glyph x positions of the specified text. If end is specified only the sub-string will be used. +// Measured values are returned in local coordinate space. +int nvgTextGlyphPositions(NVGcontext * ctx, float x, float y, const char * string, const char * end, + NVGglyphPosition * positions, int maxPositions); + +// Returns the vertical metrics based on the current text style. +// Measured values are returned in local coordinate space. +void nvgTextMetrics(NVGcontext * ctx, float * ascender, float * descender, float * lineh); + +// Breaks the specified text into lines. If end is specified only the sub-string will be used. +// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered. +// Words longer than the max width are slit at nearest character (i.e. no hyphenation). +int nvgTextBreakLines(NVGcontext * ctx, const char * string, const char * end, float breakRowWidth, NVGtextRow * rows, + int maxRows); + +// +// Internal Render API +// +enum NVGtexture { + NVG_TEXTURE_ALPHA = 0x01, + NVG_TEXTURE_BGRA = 0x02, /* ARGB8888 format (memory order: B-G-R-A) */ + NVG_TEXTURE_RGBA = 0x03, /* Standard OpenGL RGBA format */ + NVG_TEXTURE_BGR = 0x04, /* RGB888 format (memory order: B-G-R) */ + NVG_TEXTURE_RGB565 = 0x05, /* RGB565 format */ + NVG_TEXTURE_BGRX = 0x06, /* XRGB8888 format (memory order: B-G-R-X, X ignored) */ +}; + +struct NVGscissor { + float xform[6]; + float extent[2]; +}; +typedef struct NVGscissor NVGscissor; + +struct NVGvertex { + float x, y, u, v; +}; +typedef struct NVGvertex NVGvertex; + +struct NVGpath { + int first; + int count; + unsigned char closed; + int nbevel; + NVGvertex * fill; + int nfill; + NVGvertex * stroke; + int nstroke; + int winding; + int convex; +}; +typedef struct NVGpath NVGpath; + +struct NVGparams { + void * userPtr; + int edgeAntiAlias; + int (*renderCreate)(void * uptr); + int (*renderCreateTexture)(void * uptr, int type, int w, int h, int imageFlags, const unsigned char * data); + int (*renderDeleteTexture)(void * uptr, int image); + int (*renderUpdateTexture)(void * uptr, int image, int x, int y, int w, int h, const unsigned char * data); + int (*renderGetTextureSize)(void * uptr, int image, int * w, int * h); + void (*renderViewport)(void * uptr, float width, float height, float devicePixelRatio); + void (*renderCancel)(void * uptr); + void (*renderFlush)(void * uptr); + void (*renderFill)(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, NVGscissor * scissor, + float fringe, const float * bounds, const NVGpath * paths, int npaths); + void (*renderStroke)(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, NVGscissor * scissor, + float fringe, float strokeWidth, const NVGpath * paths, int npaths); + void (*renderTriangles)(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, + NVGscissor * scissor, const NVGvertex * verts, int nverts, float fringe); + void (*renderDelete)(void * uptr); +}; +typedef struct NVGparams NVGparams; + +// Constructor and destructor, called by the render back-end. +NVGcontext * nvgCreateInternal(NVGparams * params); +void nvgDeleteInternal(NVGcontext * ctx); + +NVGparams * nvgInternalParams(NVGcontext * ctx); + +// Debug function to dump cached path data. +void nvgDebugDumpPathCache(NVGcontext * ctx); + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#define NVG_NOTUSED(v) for (;;) { (void)(1 ? (void)0 : ( (void)(v) ) ); break; } + +#endif // LV_USE_NANOVG + +#ifdef __cplusplus +} +#endif + +#endif // NANOVG_H diff --git a/src/libs/nanovg/nanovg_gl.h b/src/libs/nanovg/nanovg_gl.h new file mode 100644 index 0000000000..0796c93461 --- /dev/null +++ b/src/libs/nanovg/nanovg_gl.h @@ -0,0 +1,1842 @@ +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// +#ifndef NANOVG_GL_H +#define NANOVG_GL_H + +#include "../../lv_conf_internal.h" + +#if LV_USE_NANOVG + +#ifdef __cplusplus +extern "C" { +#endif + +// Create flags + +enum NVGcreateFlags { + // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA). + NVG_ANTIALIAS = 1 << 0, + // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little + // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once. + NVG_STENCIL_STROKES = 1 << 1, + // Flag indicating that additional debug checks are done. + NVG_DEBUG = 1 << 2, +}; + +#if defined NANOVG_GL2_IMPLEMENTATION +# define NANOVG_GL2 1 +# define NANOVG_GL_IMPLEMENTATION 1 +# define NANOVG_GL_USE_UNIFORMBUFFER 0 +#elif defined NANOVG_GL3_IMPLEMENTATION +# define NANOVG_GL3 1 +# define NANOVG_GL_IMPLEMENTATION 1 +# define NANOVG_GL_USE_UNIFORMBUFFER 1 +#elif defined NANOVG_GLES2_IMPLEMENTATION +# define NANOVG_GLES2 1 +# define NANOVG_GL_IMPLEMENTATION 1 +# define NANOVG_GL_USE_UNIFORMBUFFER 0 +#elif defined NANOVG_GLES3_IMPLEMENTATION +# define NANOVG_GLES3 1 +# define NANOVG_GL_IMPLEMENTATION 1 +# define NANOVG_GL_USE_UNIFORMBUFFER 0 +#endif + +#define NANOVG_GL_USE_STATE_FILTER (1) + +// Creates NanoVG contexts for different OpenGL (ES) versions. +// Flags should be combination of the create flags above. + +#if defined NANOVG_GL2 + +NVGcontext * nvgCreateGL2(int flags); +void nvgDeleteGL2(NVGcontext * ctx); + +int nvglCreateImageFromHandleGL2(NVGcontext * ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGL2(NVGcontext * ctx, int image); + +#endif + +#if defined NANOVG_GL3 + +NVGcontext * nvgCreateGL3(int flags); +void nvgDeleteGL3(NVGcontext * ctx); + +int nvglCreateImageFromHandleGL3(NVGcontext * ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGL3(NVGcontext * ctx, int image); + +#endif + +#if defined NANOVG_GLES2 + +NVGcontext * nvgCreateGLES2(int flags); +void nvgDeleteGLES2(NVGcontext * ctx); + +int nvglCreateImageFromHandleGLES2(NVGcontext * ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGLES2(NVGcontext * ctx, int image); + +#endif + +#if defined NANOVG_GLES3 + +NVGcontext * nvgCreateGLES3(int flags); +void nvgDeleteGLES3(NVGcontext * ctx); + +int nvglCreateImageFromHandleGLES3(NVGcontext * ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGLES3(NVGcontext * ctx, int image); + +#endif + +// These are additional flags on top of NVGimageFlags. +enum NVGimageFlagsGL { + NVG_IMAGE_NODELETE = 1 << 16, // Do not delete GL texture handle. +}; + +#ifdef __cplusplus +} +#endif + +#endif /* NANOVG_GL_H */ + +#ifdef NANOVG_GL_IMPLEMENTATION + +#include +#include "nanovg.h" +#include "../../stdlib/lv_mem.h" +#include "../../stdlib/lv_string.h" +#include "../../stdlib/lv_sprintf.h" +#include "../../misc/lv_log.h" +#include "../../misc/lv_profiler.h" + +enum GLNVGuniformLoc { + GLNVG_LOC_VIEWSIZE, + GLNVG_LOC_TEX, + GLNVG_LOC_FRAG, + GLNVG_MAX_LOCS +}; + +enum GLNVGshaderType { + NSVG_SHADER_FILLGRAD, + NSVG_SHADER_FILLIMG, + NSVG_SHADER_SIMPLE, + NSVG_SHADER_IMG +}; + +#define GLNVG_SHADER_COUNT 4 + +#if NANOVG_GL_USE_UNIFORMBUFFER +enum GLNVGuniformBindings { + GLNVG_FRAG_BINDING = 0, +}; +#endif + +struct GLNVGshader { + GLuint prog; + GLuint frag; + GLuint vert; + GLint loc[GLNVG_MAX_LOCS]; +}; +typedef struct GLNVGshader GLNVGshader; + +struct GLNVGtexture { + int id; + GLuint tex; + int width, height; + int type; + int flags; +}; +typedef struct GLNVGtexture GLNVGtexture; + +struct GLNVGblend { + GLenum srcRGB; + GLenum dstRGB; + GLenum srcAlpha; + GLenum dstAlpha; +}; +typedef struct GLNVGblend GLNVGblend; + +enum GLNVGcallType { + GLNVG_NONE = 0, + GLNVG_FILL, + GLNVG_CONVEXFILL, + GLNVG_STROKE, + GLNVG_TRIANGLES, +}; + +struct GLNVGcall { + int type; + int image; + int pathOffset; + int pathCount; + int triangleOffset; + int triangleCount; + int uniformOffset; + int shaderType; + GLNVGblend blendFunc; +}; +typedef struct GLNVGcall GLNVGcall; + +struct GLNVGpath { + int fillOffset; + int fillCount; + int strokeOffset; + int strokeCount; +}; +typedef struct GLNVGpath GLNVGpath; + +#if NANOVG_GL_USE_UNIFORMBUFFER +struct GLNVGfragUniforms { + struct { + float scissorMat[12]; // matrices are actually 3 vec4s + float paintMat[12]; + union NVGcolor innerCol; + union NVGcolor outerCol; + float scissorExt[2]; + float scissorScale[2]; + float extent[2]; + float radius; + float feather; + float strokeMult; + float strokeThr; + int texType; + int type; + } s; +}; +typedef struct GLNVGfragUniforms GLNVGfragUniforms; +#else +// note: after modifying layout or size of uniform array, +// don't forget to also update the fragment shader source! +#define NANOVG_GL_UNIFORMARRAY_SIZE 11 +union GLNVGfragUniforms { + struct { + float scissorMat[12]; // matrices are actually 3 vec4s + float paintMat[12]; + union NVGcolor innerCol; + union NVGcolor outerCol; + float scissorExt[2]; + float scissorScale[2]; + float extent[2]; + float radius; + float feather; + float strokeMult; + float strokeThr; + float texType; + float type; + } s; + float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4]; +}; +typedef union GLNVGfragUniforms GLNVGfragUniforms; +#endif + +struct GLNVGcontext { + GLNVGshader shaders[GLNVG_SHADER_COUNT]; + GLNVGtexture * textures; + float view[2]; + int ntextures; + int ctextures; + int textureId; + GLuint vertBuf[2]; + int vertBufIndex; +#if defined NANOVG_GL3 + GLuint vertArr; +#endif +#if NANOVG_GL_USE_UNIFORMBUFFER + GLuint fragBuf; +#endif + int fragSize; + int flags; + int boundShader; + + // Per frame buffers + GLNVGcall * calls; + int ccalls; + int ncalls; + GLNVGpath * paths; + int cpaths; + int npaths; + struct NVGvertex * verts; + int cverts; + int nverts; + unsigned char * uniforms; + int cuniforms; + int nuniforms; + + // cached state +#if NANOVG_GL_USE_STATE_FILTER + GLuint boundTexture; + GLuint stencilMask; + GLenum stencilFunc; + GLint stencilFuncRef; + GLuint stencilFuncMask; + GLNVGblend blendFunc; +#endif + + int dummyTex; +}; +typedef struct GLNVGcontext GLNVGcontext; + +static int glnvg__maxi(int a, int b) +{ + return a > b ? a : b; +} + +#ifdef NANOVG_GLES2 +static unsigned int glnvg__nearestPow2(unsigned int num) +{ + unsigned n = num > 0 ? num - 1 : 0; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n++; + return n; +} +#endif + +static void glnvg__bindTexture(GLNVGcontext * gl, GLuint tex) +{ +#if NANOVG_GL_USE_STATE_FILTER + if(gl->boundTexture != tex) { + gl->boundTexture = tex; + glBindTexture(GL_TEXTURE_2D, tex); + } +#else + glBindTexture(GL_TEXTURE_2D, tex); +#endif +} + +static void glnvg__stencilMask(GLNVGcontext * gl, GLuint mask) +{ +#if NANOVG_GL_USE_STATE_FILTER + if(gl->stencilMask != mask) { + gl->stencilMask = mask; + glStencilMask(mask); + } +#else + glStencilMask(mask); +#endif +} + +static void glnvg__stencilFunc(GLNVGcontext * gl, GLenum func, GLint ref, GLuint mask) +{ +#if NANOVG_GL_USE_STATE_FILTER + if((gl->stencilFunc != func) || + (gl->stencilFuncRef != ref) || + (gl->stencilFuncMask != mask)) { + + gl->stencilFunc = func; + gl->stencilFuncRef = ref; + gl->stencilFuncMask = mask; + glStencilFunc(func, ref, mask); + } +#else + glStencilFunc(func, ref, mask); +#endif +} +static void glnvg__blendFuncSeparate(GLNVGcontext * gl, const GLNVGblend * blend) +{ +#if NANOVG_GL_USE_STATE_FILTER + if((gl->blendFunc.srcRGB != blend->srcRGB) || + (gl->blendFunc.dstRGB != blend->dstRGB) || + (gl->blendFunc.srcAlpha != blend->srcAlpha) || + (gl->blendFunc.dstAlpha != blend->dstAlpha)) { + + gl->blendFunc = *blend; + glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha, blend->dstAlpha); + } +#else + glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha, blend->dstAlpha); +#endif +} + +static GLNVGtexture * glnvg__allocTexture(GLNVGcontext * gl) +{ + GLNVGtexture * tex = NULL; + int i; + + for(i = 0; i < gl->ntextures; i++) { + if(gl->textures[i].id == 0) { + tex = &gl->textures[i]; + break; + } + } + if(tex == NULL) { + if(gl->ntextures + 1 > gl->ctextures) { + GLNVGtexture * textures; + int ctextures = glnvg__maxi(gl->ntextures + 1, 4) + gl->ctextures / 2; // 1.5x Overallocate + textures = (GLNVGtexture *)lv_realloc(gl->textures, sizeof(GLNVGtexture) * ctextures); + if(textures == NULL) return NULL; + gl->textures = textures; + gl->ctextures = ctextures; + } + tex = &gl->textures[gl->ntextures++]; + } + + lv_memzero(tex, sizeof(*tex)); + tex->id = ++gl->textureId; + + return tex; +} + +static GLNVGtexture * glnvg__findTexture(GLNVGcontext * gl, int id) +{ + int i; + for(i = 0; i < gl->ntextures; i++) + if(gl->textures[i].id == id) + return &gl->textures[i]; + return NULL; +} + +static int glnvg__deleteTexture(GLNVGcontext * gl, int id) +{ + int i; + for(i = 0; i < gl->ntextures; i++) { + if(gl->textures[i].id == id) { + if(gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) + glDeleteTextures(1, &gl->textures[i].tex); + lv_memzero(&gl->textures[i], sizeof(gl->textures[i])); + return 1; + } + } + return 0; +} + +static void glnvg__dumpShaderError(GLuint shader, const char * name, const char * type) +{ + GLchar str[512 + 1]; + GLsizei len = 0; + glGetShaderInfoLog(shader, 512, &len, str); + if(len > 512) len = 512; + str[len] = '\0'; + LV_LOG_ERROR("Shader %s/%s error:\n%s", name, type, str); +} + +static void glnvg__dumpProgramError(GLuint prog, const char * name) +{ + GLchar str[512 + 1]; + GLsizei len = 0; + glGetProgramInfoLog(prog, 512, &len, str); + if(len > 512) len = 512; + str[len] = '\0'; + LV_LOG_ERROR("Program %s error:\n%s", name, str); +} + +static void glnvg__checkError(GLNVGcontext * gl, const char * str) +{ + GLenum err; + if((gl->flags & NVG_DEBUG) == 0) return; + err = glGetError(); + if(err != GL_NO_ERROR) { + LV_LOG_ERROR("Error %08x after %s", err, str); + return; + } +} + +static int glnvg__createShader(GLNVGshader * shader, const char * name, const char * header, const char * opts, + const char * vshader, const char * fshader) +{ + GLint status; + GLuint prog, vert, frag; + const char * str[3]; + str[0] = header; + str[1] = opts != NULL ? opts : ""; + + lv_memzero(shader, sizeof(*shader)); + + prog = glCreateProgram(); + vert = glCreateShader(GL_VERTEX_SHADER); + frag = glCreateShader(GL_FRAGMENT_SHADER); + str[2] = vshader; + glShaderSource(vert, 3, str, 0); + str[2] = fshader; + glShaderSource(frag, 3, str, 0); + + glCompileShader(vert); + glGetShaderiv(vert, GL_COMPILE_STATUS, &status); + if(status != GL_TRUE) { + glnvg__dumpShaderError(vert, name, "vert"); + return 0; + } + + glCompileShader(frag); + glGetShaderiv(frag, GL_COMPILE_STATUS, &status); + if(status != GL_TRUE) { + glnvg__dumpShaderError(frag, name, "frag"); + return 0; + } + + glAttachShader(prog, vert); + glAttachShader(prog, frag); + + glBindAttribLocation(prog, 0, "vertex"); + glBindAttribLocation(prog, 1, "tcoord"); + + glLinkProgram(prog); + glGetProgramiv(prog, GL_LINK_STATUS, &status); + if(status != GL_TRUE) { + glnvg__dumpProgramError(prog, name); + return 0; + } + + shader->prog = prog; + shader->vert = vert; + shader->frag = frag; + + return 1; +} + +static void glnvg__deleteShader(GLNVGshader * shader) +{ + if(shader->prog != 0) + glDeleteProgram(shader->prog); + if(shader->vert != 0) + glDeleteShader(shader->vert); + if(shader->frag != 0) + glDeleteShader(shader->frag); +} + +static void glnvg__getUniforms(GLNVGshader * shader) +{ + shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize"); + shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex"); + +#if NANOVG_GL_USE_UNIFORMBUFFER + shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag"); +#else + shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag"); +#endif +} + +static int glnvg__renderCreateTexture(void * uptr, int type, int w, int h, int imageFlags, const unsigned char * data); + +static int glnvg__renderCreate(void * uptr) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + int align = 4; + + // TODO: mediump float may not be enough for GLES2 in iOS. + // see the following discussion: https://github.com/memononen/nanovg/issues/46 + static const char * shaderHeader = +#if defined NANOVG_GL2 + "#define NANOVG_GL2 1\n" +#elif defined NANOVG_GL3 + "#version 150 core\n" + "#define NANOVG_GL3 1\n" +#elif defined NANOVG_GLES2 + "#version 100\n" + "#define NANOVG_GL2 1\n" +#elif defined NANOVG_GLES3 + "#version 300 es\n" + "#define NANOVG_GL3 1\n" +#endif + +#if NANOVG_GL_USE_UNIFORMBUFFER + "#define USE_UNIFORMBUFFER 1\n" +#else + "#define UNIFORMARRAY_SIZE 11\n" +#endif + "\n"; + + static const char * fillVertShader = + "#ifdef GL_ES\n" + "#if defined(NANOVG_GL3)\n" + " precision highp float;\n" + "#else\n" + " precision mediump float;\n" + "#endif\n" + "#endif\n" + "#ifdef NANOVG_GL3\n" + " uniform vec2 viewSize;\n" + " in vec2 vertex;\n" + " in vec2 tcoord;\n" + " out vec2 ftcoord;\n" + " out vec2 fpos;\n" + " out vec2 v_scissorPos;\n" + " out vec2 v_paintPos;\n" + "#else\n" + " uniform vec2 viewSize;\n" + " attribute vec2 vertex;\n" + " attribute vec2 tcoord;\n" + " varying vec2 ftcoord;\n" + " varying vec2 fpos;\n" + " varying vec2 v_scissorPos;\n" + " varying vec2 v_paintPos;\n" + "#endif\n" + "#ifdef NANOVG_GL3\n" + "#ifdef USE_UNIFORMBUFFER\n" + " layout(std140) uniform frag {\n" + " mat3 scissorMat;\n" + " mat3 paintMat;\n" + " vec4 innerCol;\n" + " vec4 outerCol;\n" + " vec2 scissorExt;\n" + " vec2 scissorScale;\n" + " vec2 extent;\n" + " float radius;\n" + " float feather;\n" + " float strokeMult;\n" + " float strokeThr;\n" + " int texType;\n" + " int type;\n" + " };\n" + "#else\n" + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + "#endif\n" + "#else\n" + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + "#endif\n" + "#ifndef USE_UNIFORMBUFFER\n" + " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n" + " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n" + "#endif\n" + "void main(void) {\n" + " ftcoord = tcoord;\n" + " fpos = vertex;\n" + " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n" + " #if SHADER_TYPE != 2\n" // Not SIMPLE + " v_scissorPos = (scissorMat * vec3(vertex, 1.0)).xy;\n" + " #endif\n" + " #if SHADER_TYPE == 0 || SHADER_TYPE == 1\n" // FILLGRAD or FILLIMG + " v_paintPos = (paintMat * vec3(vertex, 1.0)).xy;\n" + " #endif\n" + "}\n"; + + static const char * fillFragShader = + "#ifdef GL_ES\n" + "#if defined(NANOVG_GL3)\n" + " precision highp float;\n" + "#else\n" + " precision mediump float;\n" + "#endif\n" + "#endif\n" + "#ifdef NANOVG_GL3\n" + "#ifdef USE_UNIFORMBUFFER\n" + " layout(std140) uniform frag {\n" + " mat3 scissorMat;\n" + " mat3 paintMat;\n" + " vec4 innerCol;\n" + " vec4 outerCol;\n" + " vec2 scissorExt;\n" + " vec2 scissorScale;\n" + " vec2 extent;\n" + " float radius;\n" + " float feather;\n" + " float strokeMult;\n" + " float strokeThr;\n" + " int texType;\n" + " int type;\n" + " };\n" + "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + "#endif\n" + " uniform sampler2D tex;\n" + " in vec2 ftcoord;\n" + " in vec2 fpos;\n" + " in vec2 v_scissorPos;\n" + " in vec2 v_paintPos;\n" + " out vec4 outColor;\n" + "#else\n" // !NANOVG_GL3 + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + " uniform sampler2D tex;\n" + " varying vec2 ftcoord;\n" + " varying vec2 fpos;\n" + " varying vec2 v_scissorPos;\n" + " varying vec2 v_paintPos;\n" + "#endif\n" + "#ifndef USE_UNIFORMBUFFER\n" + " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n" + " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n" + " #define innerCol frag[6]\n" + " #define outerCol frag[7]\n" + " #define scissorExt frag[8].xy\n" + " #define scissorScale frag[8].zw\n" + " #define extent frag[9].xy\n" + " #define radius frag[9].z\n" + " #define feather frag[9].w\n" + " #define strokeMult frag[10].x\n" + " #define strokeThr frag[10].y\n" + " #define texType int(frag[10].z)\n" + " #define type int(frag[10].w)\n" + "#endif\n" + "\n" + "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n" + " vec2 ext2 = ext - vec2(rad,rad);\n" + " vec2 d = abs(pt) - ext2;\n" + " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n" + "}\n" + "\n" + "// Scissoring\n" + "float scissorMask(vec2 p) {\n" + " vec2 sc = (abs(p) - scissorExt);\n" + " sc = vec2(0.5,0.5) - sc * scissorScale;\n" + " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n" + "}\n" + "#ifdef EDGE_AA\n" + "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n" + "float strokeMask() {\n" + " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n" + "}\n" + "#endif\n" + "\n" + "void main(void) {\n" + " vec4 result;\n" + " #if SHADER_TYPE != 2\n" // Not SIMPLE + " float scissor = scissorMask(v_scissorPos);\n" + " #endif\n" + "#ifdef EDGE_AA\n" + " float strokeAlpha = strokeMask();\n" + " // if (strokeAlpha < strokeThr) discard;\n" + "#else\n" + " float strokeAlpha = 1.0;\n" + "#endif\n" + " #if SHADER_TYPE == 0\n" // Gradient + " // Calculate gradient color using box gradient\n" + " vec2 pt = v_paintPos;\n" + " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n" + " vec4 color = mix(innerCol,outerCol,d);\n" + " // Combine alpha\n" + " color *= strokeAlpha * scissor;\n" + " result = color;\n" + " #elif SHADER_TYPE == 1\n" // Image + " // Calculate color fron texture\n" + " vec2 pt = v_paintPos / extent;\n" + "#ifdef NANOVG_GL3\n" + " vec4 color = texture(tex, pt);\n" + "#else\n" + " vec4 color = texture2D(tex, pt);\n" + "#endif\n" + " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" + " else if (texType == 2) color = vec4(color.x);" + " else if (texType == 3) color.rgb = color.bgr;" // BGR -> RGB swizzle (premultiplied) + " else if (texType == 4) color = vec4(color.bgr, 1.0);" // BGRX -> RGB with alpha=1 + " else if (texType == 5) color = vec4(color.bgr*color.a, color.a);" // BGR swizzle + premultiply + " // Apply color tint and alpha.\n" + " color *= innerCol;\n" + " // Combine alpha\n" + " color *= strokeAlpha * scissor;\n" + " result = color;\n" + " #elif SHADER_TYPE == 2\n" // Stencil fill + " result = vec4(1,1,1,1);\n" + " #elif SHADER_TYPE == 3\n" // Textured tris + "#ifdef NANOVG_GL3\n" + " vec4 color = texture(tex, ftcoord);\n" + "#else\n" + " vec4 color = texture2D(tex, ftcoord);\n" + "#endif\n" + " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" + " else if (texType == 2) color = vec4(color.x);" + " else if (texType == 3) color.rgb = color.bgr;" // BGR -> RGB swizzle (premultiplied) + " else if (texType == 4) color = vec4(color.bgr, 1.0);" // BGRX -> RGB with alpha=1 + " else if (texType == 5) color = vec4(color.bgr*color.a, color.a);" // BGR swizzle + premultiply + " color *= scissor;\n" + " result = color * innerCol;\n" + " #endif\n" + "#ifdef NANOVG_GL3\n" + " outColor = result;\n" + "#else\n" + " gl_FragColor = result;\n" + "#endif\n" + "}\n"; + + glnvg__checkError(gl, "init"); + + int i; + char opts[64]; + for(i = 0; i < GLNVG_SHADER_COUNT; i++) { + lv_snprintf(opts, sizeof(opts), "#define SHADER_TYPE %d\n%s", i, + (gl->flags & NVG_ANTIALIAS) ? "#define EDGE_AA 1\n" : ""); + if(glnvg__createShader(&gl->shaders[i], "shader", shaderHeader, opts, fillVertShader, fillFragShader) == 0) + return 0; + glnvg__checkError(gl, "uniform locations"); + glnvg__getUniforms(&gl->shaders[i]); + } + + // Create dynamic vertex array +#if defined NANOVG_GL3 + glGenVertexArrays(1, &gl->vertArr); +#endif + glGenBuffers(2, gl->vertBuf); + +#if NANOVG_GL_USE_UNIFORMBUFFER + // Create UBOs + glUniformBlockBinding(gl->shaders[0].prog, gl->shaders[0].loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING); + glGenBuffers(1, &gl->fragBuf); + glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align); +#endif + gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align; + + // Some platforms does not allow to have samples to unset textures. + // Create empty one which is bound when there's no texture specified. + gl->dummyTex = glnvg__renderCreateTexture(gl, NVG_TEXTURE_ALPHA, 1, 1, 0, NULL); + + glnvg__checkError(gl, "create done"); + + glFinish(); + + return 1; +} + +static int glnvg__renderCreateTexture(void * uptr, int type, int w, int h, int imageFlags, const unsigned char * data) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGtexture * tex = glnvg__allocTexture(gl); + + if(tex == NULL) return 0; + +#ifdef NANOVG_GLES2 + // Check for non-power of 2. + if(glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) { + // No repeat + if((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) { + LV_LOG_WARN("Repeat X/Y is not supported for non power-of-two textures (%d x %d)", w, h); + imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY); + } + // No mips. + if(imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + LV_LOG_WARN("Mip-maps is not support for non power-of-two textures (%d x %d)", w, h); + imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS; + } + } +#endif + + glGenTextures(1, &tex->tex); + tex->width = w; + tex->height = h; + tex->type = type; + tex->flags = imageFlags; + glnvg__bindTexture(gl, tex->tex); + + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + +#if defined (NANOVG_GL2) + // GL 1.4 and later has support for generating mipmaps using a tex parameter. + if(imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE); + } +#endif + + if(type == NVG_TEXTURE_BGRA || type == NVG_TEXTURE_BGRX) + /* BGRA/BGRX: upload as RGBA, shader will swizzle BGR->RGB */ + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); + else if(type == NVG_TEXTURE_RGBA) + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); + else if(type == NVG_TEXTURE_BGR) + /* BGR888: upload as RGB, shader will swizzle BGR->RGB */ + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, data); + else if(type == NVG_TEXTURE_RGB565) + /* RGB565: directly compatible with GL */ + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, data); + else +#if defined(NANOVG_GLES2) || defined (NANOVG_GL2) + glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); +#elif defined(NANOVG_GLES3) + glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); +#else + glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); +#endif + + if(imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + if(imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST); + } + else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + } + } + else { + if(imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + } + else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + } + } + + if(imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + } + else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + } + + if(imageFlags & NVG_IMAGE_REPEATX) + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + else + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + + if(imageFlags & NVG_IMAGE_REPEATY) + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + else + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + glPixelStorei(GL_UNPACK_ALIGNMENT, 4); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + + // The new way to build mipmaps on GLES and GL3 +#if !defined(NANOVG_GL2) + if(imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + glGenerateMipmap(GL_TEXTURE_2D); + } +#endif + + glnvg__checkError(gl, "create tex"); + glnvg__bindTexture(gl, 0); + + return tex->id; +} + + +static int glnvg__renderDeleteTexture(void * uptr, int image) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + return glnvg__deleteTexture(gl, image); +} + +static int glnvg__renderUpdateTexture(void * uptr, int image, int x, int y, int w, int h, const unsigned char * data) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGtexture * tex = glnvg__findTexture(gl, image); + + if(tex == NULL) return 0; + glnvg__bindTexture(gl, tex->tex); + + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, x); + glPixelStorei(GL_UNPACK_SKIP_ROWS, y); +#else + // No support for all of skip, need to update a whole row at a time. + if(tex->type == NVG_TEXTURE_BGRA || tex->type == NVG_TEXTURE_RGBA || tex->type == NVG_TEXTURE_BGRX) + data += y * tex->width * 4; + else if(tex->type == NVG_TEXTURE_BGR) + data += y * tex->width * 3; + else if(tex->type == NVG_TEXTURE_RGB565) + data += y * tex->width * 2; + else + data += y * tex->width; + x = 0; + w = tex->width; +#endif + + if(tex->type == NVG_TEXTURE_BGRA || tex->type == NVG_TEXTURE_BGRX) + /* BGRA/BGRX: upload as RGBA, shader will swizzle BGR->RGB */ + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, data); + else if(tex->type == NVG_TEXTURE_RGBA) + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, data); + else if(tex->type == NVG_TEXTURE_BGR) + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGB, GL_UNSIGNED_BYTE, data); + else if(tex->type == NVG_TEXTURE_RGB565) + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, data); + else +#if defined(NANOVG_GLES2) || defined(NANOVG_GL2) + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); +#else + glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RED, GL_UNSIGNED_BYTE, data); +#endif + + glPixelStorei(GL_UNPACK_ALIGNMENT, 4); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + + glnvg__bindTexture(gl, 0); + + return 1; +} + +static int glnvg__renderGetTextureSize(void * uptr, int image, int * w, int * h) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGtexture * tex = glnvg__findTexture(gl, image); + if(tex == NULL) return 0; + *w = tex->width; + *h = tex->height; + return 1; +} + +static void glnvg__xformToMat3x4(float * m3, float * t) +{ + m3[0] = t[0]; + m3[1] = t[1]; + m3[2] = 0.0f; + m3[3] = 0.0f; + m3[4] = t[2]; + m3[5] = t[3]; + m3[6] = 0.0f; + m3[7] = 0.0f; + m3[8] = t[4]; + m3[9] = t[5]; + m3[10] = 1.0f; + m3[11] = 0.0f; +} + +static NVGcolor glnvg__premulColor(NVGcolor c) +{ + c.ch.r *= c.ch.a; + c.ch.g *= c.ch.a; + c.ch.b *= c.ch.a; + return c; +} + +static int glnvg__convertPaint(GLNVGcontext * gl, GLNVGfragUniforms * frag, NVGpaint * paint, + NVGscissor * scissor, float width, float fringe, float strokeThr) +{ + GLNVGtexture * tex = NULL; + float invxform[6]; + + lv_memzero(frag, sizeof(*frag)); + + frag->s.innerCol = glnvg__premulColor(paint->innerColor); + frag->s.outerCol = glnvg__premulColor(paint->outerColor); + + if(scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) { + lv_memzero(frag->s.scissorMat, sizeof(frag->s.scissorMat)); + frag->s.scissorExt[0] = 1.0f; + frag->s.scissorExt[1] = 1.0f; + frag->s.scissorScale[0] = 1.0f; + frag->s.scissorScale[1] = 1.0f; + } + else { + nvgTransformInverse(invxform, scissor->xform); + glnvg__xformToMat3x4(frag->s.scissorMat, invxform); + frag->s.scissorExt[0] = scissor->extent[0]; + frag->s.scissorExt[1] = scissor->extent[1]; + frag->s.scissorScale[0] = sqrtf(scissor->xform[0] * scissor->xform[0] + scissor->xform[2] * scissor->xform[2]) / fringe; + frag->s.scissorScale[1] = sqrtf(scissor->xform[1] * scissor->xform[1] + scissor->xform[3] * scissor->xform[3]) / fringe; + } + + lv_memcpy(frag->s.extent, paint->extent, sizeof(frag->s.extent)); + frag->s.strokeMult = (width * 0.5f + fringe * 0.5f) / fringe; + frag->s.strokeThr = strokeThr; + + if(paint->image != 0) { + tex = glnvg__findTexture(gl, paint->image); + if(tex == NULL) return 0; + if((tex->flags & NVG_IMAGE_FLIPY) != 0) { + float m1[6], m2[6]; + nvgTransformTranslate(m1, 0.0f, frag->s.extent[1] * 0.5f); + nvgTransformMultiply(m1, paint->xform); + nvgTransformScale(m2, 1.0f, -1.0f); + nvgTransformMultiply(m2, m1); + nvgTransformTranslate(m1, 0.0f, -frag->s.extent[1] * 0.5f); + nvgTransformMultiply(m1, m2); + nvgTransformInverse(invxform, m1); + } + else { + nvgTransformInverse(invxform, paint->xform); + } + frag->s.type = NSVG_SHADER_FILLIMG; + +#if NANOVG_GL_USE_UNIFORMBUFFER + if(tex->type == NVG_TEXTURE_RGBA) + frag->s.texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1; + else if(tex->type == NVG_TEXTURE_BGRA) + frag->s.texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 3 : 5; // BGR swizzle, optionally premultiply + else if(tex->type == NVG_TEXTURE_BGR) + frag->s.texType = 3; // BGR -> RGB swizzle (no alpha channel) + else if(tex->type == NVG_TEXTURE_BGRX) + frag->s.texType = 4; // BGRX -> RGB with alpha=1 in shader + else if(tex->type == NVG_TEXTURE_RGB565) + frag->s.texType = 0; // RGB565 is directly compatible + else + frag->s.texType = 2; +#else + if(tex->type == NVG_TEXTURE_RGBA) + frag->s.texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f; + else if(tex->type == NVG_TEXTURE_BGRA) + frag->s.texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 3.0f : 5.0f; // BGR swizzle, optionally premultiply + else if(tex->type == NVG_TEXTURE_BGR) + frag->s.texType = 3.0f; // BGR -> RGB swizzle (no alpha channel) + else if(tex->type == NVG_TEXTURE_BGRX) + frag->s.texType = 4.0f; // BGRX -> RGB with alpha=1 in shader + else if(tex->type == NVG_TEXTURE_RGB565) + frag->s.texType = 0.0f; // RGB565 is directly compatible + else + frag->s.texType = 2.0f; +#endif + // printf("frag->texType = %d\n", frag->texType); + } + else { + frag->s.type = NSVG_SHADER_FILLGRAD; + frag->s.radius = paint->radius; + frag->s.feather = paint->feather; + nvgTransformInverse(invxform, paint->xform); + } + + glnvg__xformToMat3x4(frag->s.paintMat, invxform); + + return 1; +} + +static GLNVGfragUniforms * nvg__fragUniformPtr(GLNVGcontext * gl, int i); + +static void glnvg__bindShader(GLNVGcontext * gl, int shaderType) +{ + if(gl->boundShader != shaderType) { + gl->boundShader = shaderType; + glUseProgram(gl->shaders[shaderType].prog); + glUniform1i(gl->shaders[shaderType].loc[GLNVG_LOC_TEX], 0); + glUniform2fv(gl->shaders[shaderType].loc[GLNVG_LOC_VIEWSIZE], 1, gl->view); + } +} + +static void glnvg__setUniforms(GLNVGcontext * gl, int uniformOffset, int image, int shaderType) +{ + GLNVGtexture * tex = NULL; + + glnvg__bindShader(gl, shaderType); + +#if NANOVG_GL_USE_UNIFORMBUFFER + glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms)); +#else + // Optimization: NSVG_SHADER_SIMPLE doesn't use any uniforms in the fragment shader, + // so we can skip uploading them. + if(shaderType != NSVG_SHADER_SIMPLE) { + GLNVGfragUniforms * frag = nvg__fragUniformPtr(gl, uniformOffset); + glUniform4fv(gl->shaders[shaderType].loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0])); + } +#endif + + if(image != 0) { + tex = glnvg__findTexture(gl, image); + } + // If no image is set, use empty texture + if(tex == NULL) { + tex = glnvg__findTexture(gl, gl->dummyTex); + } + glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0); + glnvg__checkError(gl, "tex paint tex"); +} + +static void glnvg__renderViewport(void * uptr, float width, float height, float devicePixelRatio) +{ + NVG_NOTUSED(devicePixelRatio); + GLNVGcontext * gl = (GLNVGcontext *)uptr; + gl->view[0] = width; + gl->view[1] = height; +} + +static void glnvg__fill(GLNVGcontext * gl, GLNVGcall * call) +{ + LV_PROFILER_DRAW_BEGIN; + GLNVGpath * paths = &gl->paths[call->pathOffset]; + int i, npaths = call->pathCount; + + // Draw shapes + glEnable(GL_STENCIL_TEST); + glnvg__stencilMask(gl, 0xff); + glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff); + glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); + + // set bindpoint for solid loc + glnvg__setUniforms(gl, call->uniformOffset, 0, NSVG_SHADER_SIMPLE); + glnvg__checkError(gl, "fill simple"); + + glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP); + glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP); + glDisable(GL_CULL_FACE); + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); + glEnable(GL_CULL_FACE); + + // Draw anti-aliased pixels + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + + glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image, call->shaderType); + glnvg__checkError(gl, "fill fill"); + + if(gl->flags & NVG_ANTIALIAS) { + glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + // Draw fringes + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + + // Draw fill + glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff); + glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); + glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount); + + glDisable(GL_STENCIL_TEST); + LV_PROFILER_DRAW_END; +} + +static void glnvg__convexFill(GLNVGcontext * gl, GLNVGcall * call) +{ + LV_PROFILER_DRAW_BEGIN; + GLNVGpath * paths = &gl->paths[call->pathOffset]; + int i, npaths = call->pathCount; + + glnvg__setUniforms(gl, call->uniformOffset, call->image, call->shaderType); + glnvg__checkError(gl, "convex fill"); + + for(i = 0; i < npaths; i++) { + glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); + // Draw fringes + if(paths[i].strokeCount > 0) { + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + } + LV_PROFILER_DRAW_END; +} + +static void glnvg__stroke(GLNVGcontext * gl, GLNVGcall * call) +{ + LV_PROFILER_DRAW_BEGIN; + GLNVGpath * paths = &gl->paths[call->pathOffset]; + int npaths = call->pathCount, i; + + if(gl->flags & NVG_STENCIL_STROKES) { + + glEnable(GL_STENCIL_TEST); + glnvg__stencilMask(gl, 0xff); + + // Fill the stroke base without overlap + glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_INCR); + glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image, NSVG_SHADER_SIMPLE); + glnvg__checkError(gl, "stroke fill 0"); + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + + // Draw anti-aliased pixels. + glnvg__setUniforms(gl, call->uniformOffset, call->image, call->shaderType); + glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + + // Clear stencil buffer. + glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); + glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff); + glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); + glnvg__checkError(gl, "stroke fill 1"); + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + + glDisable(GL_STENCIL_TEST); + + // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); + + } + else { + glnvg__setUniforms(gl, call->uniformOffset, call->image, call->shaderType); + glnvg__checkError(gl, "stroke fill"); + // Draw Strokes + for(i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + LV_PROFILER_DRAW_END; +} + +static void glnvg__triangles(GLNVGcontext * gl, GLNVGcall * call) +{ + LV_PROFILER_DRAW_BEGIN; + glnvg__setUniforms(gl, call->uniformOffset, call->image, NSVG_SHADER_IMG); + glnvg__checkError(gl, "triangles fill"); + + glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount); + LV_PROFILER_DRAW_END; +} + +static void glnvg__renderCancel(void * uptr) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + gl->nverts = 0; + gl->npaths = 0; + gl->ncalls = 0; + gl->nuniforms = 0; +} + +static GLenum glnvg_convertBlendFuncFactor(int factor) +{ + if(factor == NVG_ZERO) + return GL_ZERO; + if(factor == NVG_ONE) + return GL_ONE; + if(factor == NVG_SRC_COLOR) + return GL_SRC_COLOR; + if(factor == NVG_ONE_MINUS_SRC_COLOR) + return GL_ONE_MINUS_SRC_COLOR; + if(factor == NVG_DST_COLOR) + return GL_DST_COLOR; + if(factor == NVG_ONE_MINUS_DST_COLOR) + return GL_ONE_MINUS_DST_COLOR; + if(factor == NVG_SRC_ALPHA) + return GL_SRC_ALPHA; + if(factor == NVG_ONE_MINUS_SRC_ALPHA) + return GL_ONE_MINUS_SRC_ALPHA; + if(factor == NVG_DST_ALPHA) + return GL_DST_ALPHA; + if(factor == NVG_ONE_MINUS_DST_ALPHA) + return GL_ONE_MINUS_DST_ALPHA; + if(factor == NVG_SRC_ALPHA_SATURATE) + return GL_SRC_ALPHA_SATURATE; + return GL_INVALID_ENUM; +} + +static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op) +{ + GLNVGblend blend; + blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB); + blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB); + blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha); + blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha); + if(blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || + blend.dstAlpha == GL_INVALID_ENUM) { + blend.srcRGB = GL_ONE; + blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA; + blend.srcAlpha = GL_ONE; + blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA; + } + return blend; +} + +static void glnvg__renderFlush(void * uptr) +{ + LV_PROFILER_DRAW_BEGIN; + GLNVGcontext * gl = (GLNVGcontext *)uptr; + int i; + + if(gl->ncalls > 0) { + + // Setup require GL state. + LV_PROFILER_DRAW_BEGIN_TAG("setup_gl_state"); + glEnable(GL_CULL_FACE); + glCullFace(GL_BACK); + glFrontFace(GL_CCW); + glEnable(GL_BLEND); + glDisable(GL_DEPTH_TEST); + glDisable(GL_SCISSOR_TEST); + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + glStencilMask(0xffffffff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + glStencilFunc(GL_ALWAYS, 0, 0xffffffff); + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, 0); +#if NANOVG_GL_USE_STATE_FILTER + gl->boundTexture = 0; + gl->boundShader = -1; + gl->stencilMask = 0xffffffff; + gl->stencilFunc = GL_ALWAYS; + gl->stencilFuncRef = 0; + gl->stencilFuncMask = 0xffffffff; + gl->blendFunc.srcRGB = GL_INVALID_ENUM; + gl->blendFunc.srcAlpha = GL_INVALID_ENUM; + gl->blendFunc.dstRGB = GL_INVALID_ENUM; + gl->blendFunc.dstAlpha = GL_INVALID_ENUM; +#endif + LV_PROFILER_DRAW_END_TAG("setup_gl_state"); + +#if NANOVG_GL_USE_UNIFORMBUFFER + // Upload ubo for frag shaders + LV_PROFILER_DRAW_BEGIN_TAG("glBindBuffer"); + glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); + LV_PROFILER_DRAW_END_TAG("glBindBuffer"); + LV_PROFILER_DRAW_BEGIN_TAG("glBufferData"); + glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW); + LV_PROFILER_DRAW_END_TAG("glBufferData"); +#endif + + // Upload vertex data +#if defined NANOVG_GL3 + LV_PROFILER_DRAW_BEGIN_TAG("glBindVertexArray"); + glBindVertexArray(gl->vertArr); + LV_PROFILER_DRAW_END_TAG("glBindVertexArray"); +#endif + gl->vertBufIndex = (gl->vertBufIndex + 1) % 2; + LV_PROFILER_DRAW_BEGIN_TAG("glBindBuffer"); + glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf[gl->vertBufIndex]); + LV_PROFILER_DRAW_END_TAG("glBindBuffer"); + LV_PROFILER_DRAW_BEGIN_TAG("glBufferData"); + glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW); + LV_PROFILER_DRAW_END_TAG("glBufferData"); + LV_PROFILER_DRAW_BEGIN_TAG("glEnableVertexAttribArray"); + glEnableVertexAttribArray(0); + glEnableVertexAttribArray(1); + LV_PROFILER_DRAW_END_TAG("glEnableVertexAttribArray"); + LV_PROFILER_DRAW_BEGIN_TAG("glVertexAttribPointer"); + glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid *)(size_t)0); + glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid *)(0 + 2 * sizeof(float))); + LV_PROFILER_DRAW_END_TAG("glVertexAttribPointer"); + +#if NANOVG_GL_USE_UNIFORMBUFFER + LV_PROFILER_DRAW_BEGIN_TAG("glBindBuffer"); + glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); + LV_PROFILER_DRAW_END_TAG("glBindBuffer"); +#endif + + for(i = 0; i < gl->ncalls; i++) { + GLNVGcall * call = &gl->calls[i]; + glnvg__blendFuncSeparate(gl, &call->blendFunc); + if(call->type == GLNVG_FILL) + glnvg__fill(gl, call); + else if(call->type == GLNVG_CONVEXFILL) + glnvg__convexFill(gl, call); + else if(call->type == GLNVG_STROKE) + glnvg__stroke(gl, call); + else if(call->type == GLNVG_TRIANGLES) + glnvg__triangles(gl, call); + } + + glDisableVertexAttribArray(0); + glDisableVertexAttribArray(1); +#if defined NANOVG_GL3 + glBindVertexArray(0); +#endif + glDisable(GL_CULL_FACE); + glBindBuffer(GL_ARRAY_BUFFER, 0); + glUseProgram(0); + glnvg__bindTexture(gl, 0); + } + + // Reset calls + gl->nverts = 0; + gl->npaths = 0; + gl->ncalls = 0; + gl->nuniforms = 0; + LV_PROFILER_DRAW_END; +} + +static int glnvg__maxVertCount(const NVGpath * paths, int npaths) +{ + int i, count = 0; + for(i = 0; i < npaths; i++) { + count += paths[i].nfill; + count += paths[i].nstroke; + } + return count; +} + +static GLNVGcall * glnvg__allocCall(GLNVGcontext * gl) +{ + GLNVGcall * ret = NULL; + if(gl->ncalls + 1 > gl->ccalls) { + GLNVGcall * calls; + int ccalls = glnvg__maxi(gl->ncalls + 1, 128) + gl->ccalls / 2; // 1.5x Overallocate + calls = (GLNVGcall *)lv_realloc(gl->calls, sizeof(GLNVGcall) * ccalls); + if(calls == NULL) return NULL; + gl->calls = calls; + gl->ccalls = ccalls; + } + ret = &gl->calls[gl->ncalls++]; + lv_memzero(ret, sizeof(GLNVGcall)); + return ret; +} + +static int glnvg__allocPaths(GLNVGcontext * gl, int n) +{ + int ret = 0; + if(gl->npaths + n > gl->cpaths) { + GLNVGpath * paths; + int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths / 2; // 1.5x Overallocate + paths = (GLNVGpath *)lv_realloc(gl->paths, sizeof(GLNVGpath) * cpaths); + if(paths == NULL) return -1; + gl->paths = paths; + gl->cpaths = cpaths; + } + ret = gl->npaths; + gl->npaths += n; + return ret; +} + +static int glnvg__allocVerts(GLNVGcontext * gl, int n) +{ + int ret = 0; + if(gl->nverts + n > gl->cverts) { + NVGvertex * verts; + int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts / 2; // 1.5x Overallocate + verts = (NVGvertex *)lv_realloc(gl->verts, sizeof(NVGvertex) * cverts); + if(verts == NULL) return -1; + gl->verts = verts; + gl->cverts = cverts; + } + ret = gl->nverts; + gl->nverts += n; + return ret; +} + +static int glnvg__allocFragUniforms(GLNVGcontext * gl, int n) +{ + int ret = 0, structSize = gl->fragSize; + if(gl->nuniforms + n > gl->cuniforms) { + unsigned char * uniforms; + int cuniforms = glnvg__maxi(gl->nuniforms + n, 128) + gl->cuniforms / 2; // 1.5x Overallocate + uniforms = (unsigned char *)lv_realloc(gl->uniforms, structSize * cuniforms); + if(uniforms == NULL) return -1; + gl->uniforms = uniforms; + gl->cuniforms = cuniforms; + } + ret = gl->nuniforms * structSize; + gl->nuniforms += n; + return ret; +} + +static GLNVGfragUniforms * nvg__fragUniformPtr(GLNVGcontext * gl, int i) +{ + return (GLNVGfragUniforms *)&gl->uniforms[i]; +} + +static void glnvg__vset(NVGvertex * vtx, float x, float y, float u, float v) +{ + vtx->x = x; + vtx->y = y; + vtx->u = u; + vtx->v = v; +} + +static void glnvg__renderFill(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, + NVGscissor * scissor, float fringe, + const float * bounds, const NVGpath * paths, int npaths) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGcall * call = glnvg__allocCall(gl); + NVGvertex * quad; + GLNVGfragUniforms * frag; + int i, maxverts, offset; + + if(call == NULL) return; + + call->type = GLNVG_FILL; + call->triangleCount = 4; + call->pathOffset = glnvg__allocPaths(gl, npaths); + if(call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + if(npaths == 1 && paths[0].convex) { + call->type = GLNVG_CONVEXFILL; + call->triangleCount = 0; // Bounding box fill quad not needed for convex fill + } + + // Allocate vertices for all the paths. + maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount; + offset = glnvg__allocVerts(gl, maxverts); + if(offset == -1) goto error; + + for(i = 0; i < npaths; i++) { + GLNVGpath * copy = &gl->paths[call->pathOffset + i]; + const NVGpath * path = &paths[i]; + lv_memzero(copy, sizeof(GLNVGpath)); + if(path->nfill > 0) { + copy->fillOffset = offset; + copy->fillCount = path->nfill; + lv_memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill); + offset += path->nfill; + } + if(path->nstroke > 0) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + lv_memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + // Setup uniforms for draw calls + if(call->type == GLNVG_FILL) { + // Quad + call->triangleOffset = offset; + quad = &gl->verts[call->triangleOffset]; + glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f); + glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f); + glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f); + glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f); + + call->uniformOffset = glnvg__allocFragUniforms(gl, 2); + if(call->uniformOffset == -1) goto error; + // Simple shader for stencil + frag = nvg__fragUniformPtr(gl, call->uniformOffset); + lv_memzero(frag, sizeof(*frag)); + frag->s.strokeThr = -1.0f; + frag->s.type = NSVG_SHADER_SIMPLE; + // Fill shader + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, + -1.0f); + } + else { + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if(call->uniformOffset == -1) goto error; + // Fill shader + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f); + } + + if(paint->image != 0) + call->shaderType = NSVG_SHADER_FILLIMG; + else + call->shaderType = NSVG_SHADER_FILLGRAD; + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if(gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderStroke(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, + NVGscissor * scissor, float fringe, + float strokeWidth, const NVGpath * paths, int npaths) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGcall * call = glnvg__allocCall(gl); + int i, maxverts, offset; + + if(call == NULL) return; + + call->type = GLNVG_STROKE; + call->pathOffset = glnvg__allocPaths(gl, npaths); + if(call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + maxverts = glnvg__maxVertCount(paths, npaths); + offset = glnvg__allocVerts(gl, maxverts); + if(offset == -1) goto error; + + for(i = 0; i < npaths; i++) { + GLNVGpath * copy = &gl->paths[call->pathOffset + i]; + const NVGpath * path = &paths[i]; + lv_memzero(copy, sizeof(GLNVGpath)); + if(path->nstroke) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + lv_memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + if(gl->flags & NVG_STENCIL_STROKES) { + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 2); + if(call->uniformOffset == -1) goto error; + + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, + fringe, 1.0f - 0.5f / 255.0f); + + } + else { + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if(call->uniformOffset == -1) goto error; + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + } + + if(paint->image != 0) + call->shaderType = NSVG_SHADER_FILLIMG; + else + call->shaderType = NSVG_SHADER_FILLGRAD; + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if(gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderTriangles(void * uptr, NVGpaint * paint, NVGcompositeOperationState compositeOperation, + NVGscissor * scissor, + const NVGvertex * verts, int nverts, float fringe) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + GLNVGcall * call = glnvg__allocCall(gl); + GLNVGfragUniforms * frag; + + if(call == NULL) return; + + call->type = GLNVG_TRIANGLES; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + call->triangleOffset = glnvg__allocVerts(gl, nverts); + if(call->triangleOffset == -1) goto error; + call->triangleCount = nverts; + + lv_memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts); + + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if(call->uniformOffset == -1) goto error; + frag = nvg__fragUniformPtr(gl, call->uniformOffset); + glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, fringe, -1.0f); + frag->s.type = NSVG_SHADER_IMG; + call->shaderType = NSVG_SHADER_IMG; + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if(gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderDelete(void * uptr) +{ + GLNVGcontext * gl = (GLNVGcontext *)uptr; + int i; + if(gl == NULL) return; + + for(i = 0; i < GLNVG_SHADER_COUNT; i++) + glnvg__deleteShader(&gl->shaders[i]); + +#if defined NANOVG_GL3 +#if NANOVG_GL_USE_UNIFORMBUFFER + if(gl->fragBuf != 0) + glDeleteBuffers(1, &gl->fragBuf); +#endif + if(gl->vertArr != 0) + glDeleteVertexArrays(1, &gl->vertArr); +#endif + if(gl->vertBuf[0] != 0) + glDeleteBuffers(2, gl->vertBuf); + + for(i = 0; i < gl->ntextures; i++) { + if(gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) + glDeleteTextures(1, &gl->textures[i].tex); + } + lv_free(gl->textures); + + lv_free(gl->paths); + lv_free(gl->verts); + lv_free(gl->uniforms); + lv_free(gl->calls); + + lv_free(gl); +} + + +#if defined NANOVG_GL2 + NVGcontext * nvgCreateGL2(int flags) +#elif defined NANOVG_GL3 + NVGcontext * nvgCreateGL3(int flags) +#elif defined NANOVG_GLES2 + NVGcontext * nvgCreateGLES2(int flags) +#elif defined NANOVG_GLES3 + NVGcontext * nvgCreateGLES3(int flags) +#endif +{ + NVGparams params; + NVGcontext * ctx = NULL; + GLNVGcontext * gl = (GLNVGcontext *)lv_malloc(sizeof(GLNVGcontext)); + if(gl == NULL) goto error; + lv_memzero(gl, sizeof(GLNVGcontext)); + + lv_memzero(¶ms, sizeof(params)); + params.renderCreate = glnvg__renderCreate; + params.renderCreateTexture = glnvg__renderCreateTexture; + params.renderDeleteTexture = glnvg__renderDeleteTexture; + params.renderUpdateTexture = glnvg__renderUpdateTexture; + params.renderGetTextureSize = glnvg__renderGetTextureSize; + params.renderViewport = glnvg__renderViewport; + params.renderCancel = glnvg__renderCancel; + params.renderFlush = glnvg__renderFlush; + params.renderFill = glnvg__renderFill; + params.renderStroke = glnvg__renderStroke; + params.renderTriangles = glnvg__renderTriangles; + params.renderDelete = glnvg__renderDelete; + params.userPtr = gl; + params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0; + + gl->flags = flags; + + ctx = nvgCreateInternal(¶ms); + if(ctx == NULL) goto error; + + return ctx; + +error: + // 'gl' is freed by nvgDeleteInternal. + if(ctx != NULL) nvgDeleteInternal(ctx); + return NULL; +} + +#if defined NANOVG_GL2 + void nvgDeleteGL2(NVGcontext * ctx) +#elif defined NANOVG_GL3 + void nvgDeleteGL3(NVGcontext * ctx) +#elif defined NANOVG_GLES2 + void nvgDeleteGLES2(NVGcontext * ctx) +#elif defined NANOVG_GLES3 + void nvgDeleteGLES3(NVGcontext * ctx) +#endif +{ + nvgDeleteInternal(ctx); +} + +#if defined NANOVG_GL2 + int nvglCreateImageFromHandleGL2(NVGcontext * ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GL3 + int nvglCreateImageFromHandleGL3(NVGcontext * ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GLES2 + int nvglCreateImageFromHandleGLES2(NVGcontext * ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GLES3 + int nvglCreateImageFromHandleGLES3(NVGcontext * ctx, GLuint textureId, int w, int h, int imageFlags) +#endif +{ + GLNVGcontext * gl = (GLNVGcontext *)nvgInternalParams(ctx)->userPtr; + GLNVGtexture * tex = glnvg__allocTexture(gl); + + if(tex == NULL) return 0; + + tex->type = NVG_TEXTURE_RGBA; + tex->tex = textureId; + tex->flags = imageFlags; + tex->width = w; + tex->height = h; + + return tex->id; +} + +#if defined NANOVG_GL2 + GLuint nvglImageHandleGL2(NVGcontext * ctx, int image) +#elif defined NANOVG_GL3 + GLuint nvglImageHandleGL3(NVGcontext * ctx, int image) +#elif defined NANOVG_GLES2 + GLuint nvglImageHandleGLES2(NVGcontext * ctx, int image) +#elif defined NANOVG_GLES3 + GLuint nvglImageHandleGLES3(NVGcontext * ctx, int image) +#endif +{ + GLNVGcontext * gl = (GLNVGcontext *)nvgInternalParams(ctx)->userPtr; + GLNVGtexture * tex = glnvg__findTexture(gl, image); + return tex->tex; +} + +#endif /* LV_USE_NANOVG */ + +#endif /* NANOVG_GL_IMPLEMENTATION */ diff --git a/src/libs/nanovg/nanovg_gl_utils.h b/src/libs/nanovg/nanovg_gl_utils.h new file mode 100644 index 0000000000..93aea96003 --- /dev/null +++ b/src/libs/nanovg/nanovg_gl_utils.h @@ -0,0 +1,167 @@ +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// +#ifndef NANOVG_GL_UTILS_H +#define NANOVG_GL_UTILS_H + +#include "../../lv_conf_internal.h" +#include "../../stdlib/lv_mem.h" +#include "../../stdlib/lv_string.h" + +#if LV_USE_NANOVG + +#ifdef NANOVG_GL_IMPLEMENTATION +struct NVGLUframebuffer { + NVGcontext * ctx; + GLuint fbo; + GLuint rbo; + GLuint texture; + int image; +}; +#endif + +typedef struct NVGLUframebuffer NVGLUframebuffer; + +// Helper function to create GL frame buffer to render to. +// format: see NVGtexture +void nvgluBindFramebuffer(NVGLUframebuffer * fb); +NVGLUframebuffer * nvgluCreateFramebuffer(NVGcontext * ctx, int w, int h, int imageFlags, int format); +void nvgluDeleteFramebuffer(NVGLUframebuffer * fb); + +#endif // NANOVG_GL_UTILS_H + +#ifdef NANOVG_GL_IMPLEMENTATION + +#if defined(NANOVG_GL3) || defined(NANOVG_GLES2) || defined(NANOVG_GLES3) + // FBO is core in OpenGL 3>. + #define NANOVG_FBO_VALID 1 +#elif defined(NANOVG_GL2) + // On OS X including glext defines FBO on GL2 too. + #ifdef __APPLE__ + #include + #define NANOVG_FBO_VALID 1 + #endif +#endif + +static GLint defaultFBO = -1; + +NVGLUframebuffer * nvgluCreateFramebuffer(NVGcontext * ctx, int w, int h, int imageFlags, int format) +{ +#ifdef NANOVG_FBO_VALID + GLint defFBO; + GLint defaultRBO; + NVGLUframebuffer * fb = NULL; + + glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defFBO); + glGetIntegerv(GL_RENDERBUFFER_BINDING, &defaultRBO); + + fb = (NVGLUframebuffer *)lv_malloc(sizeof(NVGLUframebuffer)); + if(fb == NULL) goto error; + lv_memzero(fb, sizeof(NVGLUframebuffer)); + + fb->image = nvgCreateImage(ctx, w, h, imageFlags | NVG_IMAGE_FLIPY | NVG_IMAGE_PREMULTIPLIED, format, NULL); + +#if defined NANOVG_GL2 + fb->texture = nvglImageHandleGL2(ctx, fb->image); +#elif defined NANOVG_GL3 + fb->texture = nvglImageHandleGL3(ctx, fb->image); +#elif defined NANOVG_GLES2 + fb->texture = nvglImageHandleGLES2(ctx, fb->image); +#elif defined NANOVG_GLES3 + fb->texture = nvglImageHandleGLES3(ctx, fb->image); +#endif + + fb->ctx = ctx; + + // frame buffer object + glGenFramebuffers(1, &fb->fbo); + glBindFramebuffer(GL_FRAMEBUFFER, fb->fbo); + + // render buffer object + glGenRenderbuffers(1, &fb->rbo); + glBindRenderbuffer(GL_RENDERBUFFER, fb->rbo); + glRenderbufferStorage(GL_RENDERBUFFER, GL_STENCIL_INDEX8, w, h); + + // combine all + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fb->texture, 0); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, fb->rbo); + + if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { +#ifdef GL_DEPTH24_STENCIL8 + // If GL_STENCIL_INDEX8 is not supported, try GL_DEPTH24_STENCIL8 as a fallback. + // Some graphics cards require a depth buffer along with a stencil. + glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, w, h); + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fb->texture, 0); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, fb->rbo); + + if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) +#endif // GL_DEPTH24_STENCIL8 + goto error; + } + + glBindFramebuffer(GL_FRAMEBUFFER, defFBO); + glBindRenderbuffer(GL_RENDERBUFFER, defaultRBO); + return fb; +error: + glBindFramebuffer(GL_FRAMEBUFFER, defFBO); + glBindRenderbuffer(GL_RENDERBUFFER, defaultRBO); + nvgluDeleteFramebuffer(fb); + return NULL; +#else + NVG_NOTUSED(ctx); + NVG_NOTUSED(w); + NVG_NOTUSED(h); + NVG_NOTUSED(imageFlags); + NVG_NOTUSED(format); + return NULL; +#endif +} + +void nvgluBindFramebuffer(NVGLUframebuffer * fb) +{ +#ifdef NANOVG_FBO_VALID + if(defaultFBO == -1) glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO); + glBindFramebuffer(GL_FRAMEBUFFER, fb != NULL ? fb->fbo : (GLuint)defaultFBO); +#else + NVG_NOTUSED(fb); +#endif +} + +void nvgluDeleteFramebuffer(NVGLUframebuffer * fb) +{ +#ifdef NANOVG_FBO_VALID + if(fb == NULL) return; + if(fb->fbo != 0) + glDeleteFramebuffers(1, &fb->fbo); + if(fb->rbo != 0) + glDeleteRenderbuffers(1, &fb->rbo); + if(fb->image >= 0) + nvgDeleteImage(fb->ctx, fb->image); + fb->ctx = NULL; + fb->fbo = 0; + fb->rbo = 0; + fb->texture = 0; + fb->image = -1; + lv_free(fb); +#else + NVG_NOTUSED(fb); +#endif +} + +#endif // LV_USE_NANOVG + +#endif // NANOVG_GL_IMPLEMENTATION diff --git a/src/lv_conf_internal.h b/src/lv_conf_internal.h index 8c7ea432ff..36aef9d7cd 100644 --- a/src/lv_conf_internal.h +++ b/src/lv_conf_internal.h @@ -33,6 +33,11 @@ #define LV_NEMA_HAL_CUSTOM 0 #define LV_NEMA_HAL_STM32 1 +#define LV_NANOVG_BACKEND_GL2 1 +#define LV_NANOVG_BACKEND_GL3 2 +#define LV_NANOVG_BACKEND_GLES2 3 +#define LV_NANOVG_BACKEND_GLES3 4 + /** Handle special Kconfig options. */ #ifndef LV_KCONFIG_IGNORE #include "lv_conf_kconfig.h" @@ -1161,6 +1166,50 @@ #endif #endif +/** Use NanoVG Renderer + * - Requires LV_USE_NANOVG, LV_USE_MATRIX. + */ +#ifndef LV_USE_DRAW_NANOVG + #ifdef CONFIG_LV_USE_DRAW_NANOVG + #define LV_USE_DRAW_NANOVG CONFIG_LV_USE_DRAW_NANOVG + #else + #define LV_USE_DRAW_NANOVG 0 + #endif +#endif +#if LV_USE_DRAW_NANOVG + /** Select OpenGL backend for NanoVG: + * - LV_NANOVG_BACKEND_GL2: OpenGL 2.0 + * - LV_NANOVG_BACKEND_GL3: OpenGL 3.0+ + * - LV_NANOVG_BACKEND_GLES2: OpenGL ES 2.0 + * - LV_NANOVG_BACKEND_GLES3: OpenGL ES 3.0+ + */ + #ifndef LV_NANOVG_BACKEND + #ifdef CONFIG_LV_NANOVG_BACKEND + #define LV_NANOVG_BACKEND CONFIG_LV_NANOVG_BACKEND + #else + #define LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GLES2 + #endif + #endif + + /** Draw image texture cache count. */ + #ifndef LV_NANOVG_IMAGE_CACHE_CNT + #ifdef CONFIG_LV_NANOVG_IMAGE_CACHE_CNT + #define LV_NANOVG_IMAGE_CACHE_CNT CONFIG_LV_NANOVG_IMAGE_CACHE_CNT + #else + #define LV_NANOVG_IMAGE_CACHE_CNT 128 + #endif + #endif + + /** Draw letter texture cache count. */ + #ifndef LV_NANOVG_LETTER_CACHE_CNT + #ifdef CONFIG_LV_NANOVG_LETTER_CACHE_CNT + #define LV_NANOVG_LETTER_CACHE_CNT CONFIG_LV_NANOVG_LETTER_CACHE_CNT + #else + #define LV_NANOVG_LETTER_CACHE_CNT 512 + #endif + #endif +#endif + /*======================= * FEATURE CONFIGURATION *=======================*/ @@ -3247,6 +3296,15 @@ #endif #endif +/** Enable NanoVG (vector graphics library) */ +#ifndef LV_USE_NANOVG + #ifdef CONFIG_LV_USE_NANOVG + #define LV_USE_NANOVG CONFIG_LV_USE_NANOVG + #else + #define LV_USE_NANOVG 0 + #endif +#endif + /** Use lvgl built-in LZ4 lib */ #ifndef LV_USE_LZ4_INTERNAL #ifdef CONFIG_LV_USE_LZ4_INTERNAL diff --git a/src/lv_conf_kconfig.h b/src/lv_conf_kconfig.h index 3a8d5d10bf..10dee05685 100644 --- a/src/lv_conf_kconfig.h +++ b/src/lv_conf_kconfig.h @@ -107,6 +107,20 @@ extern "C" { # define CONFIG_LV_USE_OS LV_OS_CUSTOM #endif +/******************* + * LV_NANOVG_BACKEND + *******************/ + +#ifdef CONFIG_LV_NANOVG_BACKEND_GL2 +# define CONFIG_LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GL2 +#elif defined(CONFIG_LV_NANOVG_BACKEND_GL3) +# define CONFIG_LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GL3 +#elif defined(CONFIG_LV_NANOVG_BACKEND_GLES2) +# define CONFIG_LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GLES2 +#elif defined(CONFIG_LV_NANOVG_BACKEND_GLES3) +# define CONFIG_LV_NANOVG_BACKEND LV_NANOVG_BACKEND_GLES3 +#endif + /******************* * LV_MEM_SIZE *******************/ diff --git a/src/misc/cache/instance/lv_image_cache.c b/src/misc/cache/instance/lv_image_cache.c index e6e4fa27e4..aaa236294d 100644 --- a/src/misc/cache/instance/lv_image_cache.c +++ b/src/misc/cache/instance/lv_image_cache.c @@ -82,6 +82,9 @@ void lv_image_cache_drop(const void * src) /*If user invalidate image, the header cache should be invalidated too.*/ lv_image_header_cache_drop(src); + /*Notify draw units to invalidate any cached resources (e.g., GPU textures) for this image source.*/ + lv_draw_unit_send_event(NULL, LV_EVENT_INVALIDATE_AREA, (void *)src); + if(src == NULL) { lv_cache_drop_all(img_cache_p, NULL); return; diff --git a/src/misc/lv_pending.c b/src/misc/lv_pending.c new file mode 100644 index 0000000000..8feac7d469 --- /dev/null +++ b/src/misc/lv_pending.c @@ -0,0 +1,117 @@ +/** + * @file lv_pending.c + * + */ + +/********************* + * INCLUDES + *********************/ + +#include "lv_pending.h" +#include "lv_array.h" +#include "lv_assert.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +struct _lv_pending_t { + lv_array_t * arr_act; + lv_array_t arr_1; + lv_array_t arr_2; + lv_pending_free_cb_t free_cb; + void * user_data; +}; + +/********************** + * STATIC PROTOTYPES + **********************/ + +static inline void lv_pending_array_clear(lv_pending_t * pending, lv_array_t * arr); + +/********************** + * STATIC VARIABLES + **********************/ + +/********************** + * MACROS + **********************/ + +/********************** + * GLOBAL FUNCTIONS + **********************/ + +lv_pending_t * lv_pending_create(size_t obj_size, uint32_t capacity_default) +{ + lv_pending_t * pending = lv_malloc_zeroed(sizeof(lv_pending_t)); + LV_ASSERT_MALLOC(pending); + lv_array_init(&pending->arr_1, capacity_default, obj_size); + lv_array_init(&pending->arr_2, capacity_default, obj_size); + pending->arr_act = &pending->arr_1; + return pending; +} + +void lv_pending_destroy(lv_pending_t * pending) +{ + LV_ASSERT_NULL(pending); + lv_pending_remove_all(pending); + lv_array_deinit(&pending->arr_1); + lv_array_deinit(&pending->arr_2); + lv_memzero(pending, sizeof(lv_pending_t)); + lv_free(pending); +} + +void lv_pending_set_free_cb(lv_pending_t * pending, lv_pending_free_cb_t free_cb, + void * user_data) +{ + LV_ASSERT_NULL(pending); + LV_ASSERT_NULL(free_cb); + pending->free_cb = free_cb; + pending->user_data = user_data; +} + +void lv_pending_add(lv_pending_t * pending, void * obj) +{ + LV_ASSERT_NULL(pending); + LV_ASSERT_NULL(obj); + lv_array_push_back(pending->arr_act, obj); +} + +void lv_pending_remove_all(lv_pending_t * pending) +{ + LV_ASSERT_NULL(pending); + + lv_pending_array_clear(pending, &pending->arr_1); + lv_pending_array_clear(pending, &pending->arr_2); +} + +void lv_pending_swap(lv_pending_t * pending) +{ + pending->arr_act = (pending->arr_act == &pending->arr_1) ? &pending->arr_2 : &pending->arr_1; + lv_pending_array_clear(pending, pending->arr_act); +} + +/********************** + * STATIC FUNCTIONS + **********************/ + +static inline void lv_pending_array_clear(lv_pending_t * pending, lv_array_t * arr) +{ + LV_ASSERT_NULL(pending->free_cb); + + uint32_t size = lv_array_size(arr); + if(size == 0) { + return; + } + + /* remove all the pending objects */ + for(uint32_t i = 0; i < size; i++) { + pending->free_cb(lv_array_at(arr, i), pending->user_data); + } + + lv_array_clear(arr); +} diff --git a/src/misc/lv_pending.h b/src/misc/lv_pending.h new file mode 100644 index 0000000000..9a23d0ea75 --- /dev/null +++ b/src/misc/lv_pending.h @@ -0,0 +1,85 @@ +/** + * @file lv_pending.h + * + */ + +#ifndef LV_PENDING_H +#define LV_PENDING_H + +#ifdef __cplusplus +extern "C" { +#endif + +/********************* + * INCLUDES + *********************/ + +#include "lv_types.h" + +/********************* + * DEFINES + *********************/ + +/********************** + * TYPEDEFS + **********************/ + +typedef struct _lv_pending_t lv_pending_t; + +typedef void (*lv_pending_free_cb_t)(void * obj, void * user_data); + +/********************** + * GLOBAL PROTOTYPES + **********************/ + +/** + * Create a pending list + * @param obj_size the size of the objects in the list + * @param capacity_default the default capacity of the list + * @return a pointer to the pending list + */ +lv_pending_t * lv_pending_create(size_t obj_size, uint32_t capacity_default); + +/** + * Destroy a pending list + * @param pending pointer to the pending list + */ +void lv_pending_destroy(lv_pending_t * pending); + +/** + * Set a free callback for the pending list + * @param pending pointer to the pending list + * @param free_cb the free callback + * @param user_data user data to pass to the free callback + */ +void lv_pending_set_free_cb(lv_pending_t * pending, lv_pending_free_cb_t free_cb, + void * user_data); + +/** + * Add an object to the pending list + * @param pending pointer to the pending list + * @param obj pointer to the object to add + */ +void lv_pending_add(lv_pending_t * pending, void * obj); + +/** + * Remove all objects from both pending lists + * @param pending pointer to the pending list + */ +void lv_pending_remove_all(lv_pending_t * pending); + +/** + * Remove all old object references and swap new object references + * @param pending pointer to the pending list + */ +void lv_pending_swap(lv_pending_t * pending); + +/********************** + * MACROS + **********************/ + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*LV_PENDING_H*/ diff --git a/src/widgets/canvas/lv_canvas.c b/src/widgets/canvas/lv_canvas.c index 7194d9bd37..213450ed5c 100644 --- a/src/widgets/canvas/lv_canvas.c +++ b/src/widgets/canvas/lv_canvas.c @@ -395,11 +395,16 @@ void lv_canvas_init_layer(lv_obj_t * obj, lv_layer_t * layer) layer->buf_area = canvas_area; layer->_clip_area = canvas_area; layer->phy_clip_area = canvas_area; + + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_CREATED, layer); } void lv_canvas_finish_layer(lv_obj_t * canvas, lv_layer_t * layer) { - if(layer->draw_task_head == NULL) return; + if(layer->draw_task_head == NULL) { + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_DELETED, layer); + return; + } bool task_dispatched; @@ -412,6 +417,9 @@ void lv_canvas_finish_layer(lv_obj_t * canvas, lv_layer_t * layer) lv_draw_dispatch_request(); } } + + lv_draw_unit_send_event(NULL, LV_EVENT_SCREEN_LOAD_START, layer); + lv_draw_unit_send_event(NULL, LV_EVENT_CHILD_DELETED, layer); lv_obj_invalidate(canvas); } diff --git a/tests/src/lv_test_conf_full.h b/tests/src/lv_test_conf_full.h index accf94d7ed..22a0e3f8bc 100644 --- a/tests/src/lv_test_conf_full.h +++ b/tests/src/lv_test_conf_full.h @@ -192,6 +192,9 @@ #ifndef LV_USE_OPENGLES #if !defined(NON_AMD64_BUILD) && !defined(_MSC_VER) && !defined(_WIN32) #define LV_USE_OPENGLES 1 + + #define LV_USE_NANOVG 1 + #define LV_USE_DRAW_NANOVG 1 #endif #endif