diff options
Diffstat (limited to 'display')
-rw-r--r-- | display/brush.c | 391 | ||||
-rw-r--r-- | display/driver.c | 1314 | ||||
-rw-r--r-- | display/driver.rc | 29 | ||||
-rw-r--r-- | display/lookup3.c | 18 | ||||
-rw-r--r-- | display/makefile | 2 | ||||
-rw-r--r-- | display/mspace.c | 2435 | ||||
-rw-r--r-- | display/mspace.h | 150 | ||||
-rw-r--r-- | display/pointer.c | 140 | ||||
-rw-r--r-- | display/quic.c | 18 | ||||
-rw-r--r-- | display/qxldd.h | 301 | ||||
-rw-r--r-- | display/res.c | 2433 | ||||
-rw-r--r-- | display/res.h | 56 | ||||
-rw-r--r-- | display/rop.c | 1526 | ||||
-rw-r--r-- | display/rop.h | 35 | ||||
-rw-r--r-- | display/sources | 34 | ||||
-rw-r--r-- | display/text.c | 116 | ||||
-rw-r--r-- | display/utils.h | 113 |
17 files changed, 9111 insertions, 0 deletions
diff --git a/display/brush.c b/display/brush.c new file mode 100644 index 0000000..6105487 --- /dev/null +++ b/display/brush.c @@ -0,0 +1,391 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "os_dep.h" +#include "qxldd.h" +#include "utils.h" +#include "res.h" + +typedef struct InternalBrush { + ULONG format; + SIZEL size; + UINT32 stride; + UINT32 key; + UINT8 data[0]; +} InternalBrush; + +#define COPY1BPP(bits) \ +static _inline void realizeBrush##bits##Copy1bpp(PDev *pdev, InternalBrush *internal, \ + UINT8 *src, LONG src_stride, \ + XLATEOBJ *color_trans) \ +{ \ + UINT8 *end = src + src_stride * internal->size.cy; \ + UINT8 *dest = internal->data; \ + \ + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); \ + ASSERT(pdev, color_trans && color_trans->flXlate & XO_TABLE); \ + for (; src < end; src += src_stride, dest += internal->stride) { \ + UINT##bits *now = (UINT##bits *)dest; \ + int i; \ + \ + for (i = 0; i < internal->size.cx; i++) { \ + *(now++) = (UINT##bits)color_trans->pulXlate[test_bit_be(src, i)]; \ + } \ + } \ + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); \ +} + +#define COPY4BPP(bits) \ +static _inline void realizeBrush##bits##Copy4bpp(PDev *pdev, InternalBrush *internal, \ + UINT8 *src, LONG src_stride, \ + XLATEOBJ *color_trans) \ +{ \ + UINT8 *dest = internal->data; \ + UINT8 *end = dest + internal->stride * internal->size.cy; \ + \ + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); \ + ASSERT(pdev, color_trans && color_trans->flXlate & XO_TABLE); \ + for (; dest < end; dest += internal->stride, src += src_stride) { \ + UINT8 *src_line = src; \ + UINT8 *src_line_end = src_line + (internal->size.cx >> 1); \ + UINT##bits *dest_line = (UINT##bits *)dest; \ + \ + for (; src_line < src_line_end; src_line++) { \ + ASSERT(pdev, (*src_line & 0x0fU) < color_trans->cEntries); \ + ASSERT(pdev, ((*src_line >> 4) & 0x0fU) < color_trans->cEntries); \ + *(dest_line++) = (UINT##bits)color_trans->pulXlate[(*src_line >> 4) & 0x0f];\ + *(dest_line++) = (UINT##bits)color_trans->pulXlate[*src_line & 0x0f]; \ + } \ + if (internal->size.cx & 1) { \ + *(dest_line) = (UINT##bits)color_trans->pulXlate[(*src_line >> 4) & 0x0f]; \ + } \ + } \ + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); \ +} + +#define COPY8BPP(bits) \ +static _inline void realizeBrush##bits##Copy8bpp(PDev *pdev, InternalBrush *internal, \ + UINT8 *src, LONG src_stride, \ + XLATEOBJ *color_trans) \ +{ \ + UINT8 *dest = internal->data; \ + UINT8 *end = dest + internal->stride * internal->size.cy; \ + \ + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); \ + ASSERT(pdev, color_trans && color_trans->flXlate & XO_TABLE); \ + for (; dest < end; dest += internal->stride, src += src_stride) { \ + UINT8 *src_line = src; \ + UINT8 *src_line_end = src_line + internal->size.cx; \ + UINT##bits *dest_line = (UINT##bits *)dest; \ + \ + for (; src_line < src_line_end; ++dest_line, src_line++) { \ + ASSERT(pdev, *src_line < color_trans->cEntries); \ + *dest_line = (UINT##bits)color_trans->pulXlate[*src_line]; \ + } \ + } \ + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); \ +} + +COPY1BPP(16); +COPY1BPP(32); +COPY4BPP(16); +COPY4BPP(32); +COPY8BPP(16); +COPY8BPP(32); + +static _inline void realizeBrush32Copy16bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *dest = internal->data; + UINT8 *end = dest + internal->stride * internal->size.cy; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; dest < end; dest += internal->stride, src += src_stride) { + UINT32 *dest_line = (UINT32 *)dest; + UINT32 *dest_line_end = dest_line + internal->size.cx; + UINT16 *src_line = (UINT16 *)src; + + for (; dest_line < dest_line_end; ++dest_line, src_line++) { + *dest_line = _16bppTo32bpp(*src_line); + } + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +static _inline void realizeBrush32Copy24bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *dest = internal->data; + UINT8 *end = dest + internal->stride * internal->size.cy; + int line_size = internal->size.cx << 2; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; dest < end; dest += internal->stride, src += src_stride) { + UINT8* dest_line = dest; + UINT8* dest_line_end = dest_line + line_size; + UINT8* src_line = src; + + while (dest_line < dest_line_end) { + *(dest_line++) = *(src_line++); + *(dest_line++) = *(src_line++); + *(dest_line++) = *(src_line++); + *(dest_line++) = 0; + } + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +static _inline void realizeBrush32Copy32bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *now = internal->data; + UINT8 *end = now + internal->stride * internal->size.cy; + int line_size = internal->size.cx << 2; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; now < end; now += internal->stride, src += src_stride) { + RtlCopyMemory(now, src, line_size); + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +static _inline void realizeBrush16Copy16bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *dest = internal->data; + UINT8 *end = dest + internal->stride * internal->size.cy; + int line_size = internal->size.cx << 1; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; dest < end; dest += internal->stride, src += src_stride) { + RtlCopyMemory(dest, src, line_size); + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +static _inline void realizeBrush16Copy24bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *dest = internal->data; + UINT8 *end = dest + internal->stride * internal->size.cy; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; dest < end; dest += internal->stride, src += src_stride) { + UINT16 *dest_line = (UINT16 *)dest; + UINT16 *dest_line_end = dest_line + internal->size.cx; + UINT8 *src_line = src; + + while (dest_line < dest_line_end) { + *(dest_line++) = ((UINT16)*(src_line++) >> 3) | + (((UINT16)*(src_line++) & 0xf8) << 2) | + (((UINT16)*(src_line++) & 0xf8) << 7); + } + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +UINT16 _32bppTo16bpp(UINT32 color) +{ + return ((color & 0xf8) >> 3) | ((color & 0xf800) >> 6) | ((color & 0xf80000) >> 9); +} + +static _inline void realizeBrush16Copy32bpp(PDev *pdev, InternalBrush *internal, UINT8 *src, + LONG src_stride) +{ + UINT8 *now = internal->data; + UINT8 *end = now + internal->stride * internal->size.cy; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + for (; now < end; now += internal->stride, src += src_stride) { + UINT16 *dest_line = (UINT16 *)now; + UINT16 *dest_line_end = dest_line + internal->size.cx; + UINT32 *src_line = (UINT32 *)src; + + while (dest_line < dest_line_end) { + *(dest_line++) = _32bppTo16bpp(*(src_line++)); + } + } + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); +} + +BOOL APIENTRY DrvRealizeBrush(BRUSHOBJ *brush, SURFOBJ *target, SURFOBJ *pattern, SURFOBJ *mask, + XLATEOBJ *color_trans, ULONG hatch) +{ + PDev *pdev; + InternalBrush *internal; + int stride; + int size; + + if (!(pdev = (PDev *)target->dhpdev)) { + ASSERT(NULL, 0); + return FALSE; + } + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + if (mask) { + DEBUG_PRINT((pdev, 0, "%s: ignoring mask\n", __FUNCTION__)); + } + + switch (pattern->iBitmapFormat) { + case BMF_1BPP: + case BMF_32BPP: + case BMF_24BPP: + case BMF_16BPP: + case BMF_8BPP: + case BMF_4BPP: + break; + default: + DEBUG_PRINT((pdev, 0, "%s: bad format\n", __FUNCTION__)); + return FALSE; + } + stride = (pdev->bitmap_format == BMF_32BPP) ? pattern->sizlBitmap.cx << 2 : + ALIGN(pattern->sizlBitmap.cx << 1, 4); + size = stride * pattern->sizlBitmap.cy; + size += sizeof(InternalBrush); + + if (!(internal = (InternalBrush *)BRUSHOBJ_pvAllocRbrush(brush, size))) { + DEBUG_PRINT((pdev, 0, "%s: alloc failed\n", __FUNCTION__)); + return FALSE; + } + internal->size = pattern->sizlBitmap; + internal->key = 0; + internal->stride = stride; + if ((internal->format = pdev->bitmap_format) == BMF_32BPP) { + switch (pattern->iBitmapFormat) { + case BMF_1BPP: + realizeBrush32Copy1bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + case BMF_32BPP: + realizeBrush32Copy32bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_24BPP: + realizeBrush32Copy24bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_16BPP: + realizeBrush32Copy16bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_8BPP: + realizeBrush32Copy8bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + case BMF_4BPP: + realizeBrush32Copy4bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + } + } else { + ASSERT(pdev, pdev->bitmap_format == BMF_16BPP); + switch (pattern->iBitmapFormat) { + case BMF_1BPP: + realizeBrush16Copy1bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + case BMF_32BPP: + realizeBrush16Copy32bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_24BPP: + realizeBrush16Copy24bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_16BPP: + realizeBrush16Copy16bpp(pdev, internal, pattern->pvScan0, pattern->lDelta); + break; + case BMF_8BPP: + realizeBrush16Copy8bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + case BMF_4BPP: + realizeBrush16Copy4bpp(pdev, internal, pattern->pvScan0, pattern->lDelta, color_trans); + break; + } + } + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + return TRUE; +} + + +static _inline BOOL GetPattern(PDev *pdev, QXLDrawable *drawable, PHYSICAL *pattern, + InternalBrush *brush) +{ + HSURF hsurf; + SURFOBJ *surf_obj; + Rect area; + UINT32 key; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + if (brush->key && QXLGetBitsFromCache(pdev, drawable, brush->key, pattern)) { + DEBUG_PRINT((pdev, 13, "%s: from cache\n", __FUNCTION__)); + return TRUE; + } + + if (!(hsurf = (HSURF)EngCreateBitmap(brush->size, brush->stride, brush->format, BMF_TOPDOWN, + brush->data))) { + DEBUG_PRINT((pdev, 0, "%s: create bitmap failed\n", __FUNCTION__)); + return FALSE; + } + + if (!(surf_obj = EngLockSurface(hsurf))) { + DEBUG_PRINT((pdev, 0, "%s: lock surf failed\n", __FUNCTION__)); + goto error_1; + } + area.left = area.top = 0; + area.right = brush->size.cx; + area.bottom = brush->size.cy; + + if (!QXLGetBitmap(pdev, drawable, pattern, surf_obj, &area, NULL, &key, TRUE)) { + goto error_2; + } + + brush->key = key; + EngUnlockSurface(surf_obj); + EngDeleteSurface(hsurf); + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); + return TRUE; + +error_2: + EngUnlockSurface(surf_obj); +error_1: + EngDeleteSurface(hsurf); + return FALSE; +} + + +BOOL QXLGetBrush(PDev *pdev, QXLDrawable *drawable, Brush *qxl_brush, + BRUSHOBJ *brush, POINTL *brush_pos) +{ + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + ASSERT(pdev, brush); + + if (brush->iSolidColor == ~0) { + ASSERT(pdev, brush_pos); + + DEBUG_PRINT((pdev, 11, "%s: pattern\n", __FUNCTION__)); + if (!brush->pvRbrush && !BRUSHOBJ_pvGetRbrush(brush)) { + DEBUG_PRINT((pdev, 0, "%s: brush realize failed\n", __FUNCTION__)); + return FALSE; + } + qxl_brush->type = BRUSH_TYPE_PATTERN; + qxl_brush->u.pattern.pos.x = brush_pos->x; + qxl_brush->u.pattern.pos.y = brush_pos->y; + if (!GetPattern(pdev, drawable, &qxl_brush->u.pattern.pat, brush->pvRbrush)) { + return FALSE; + } + } else { + qxl_brush->type = BRUSH_TYPE_SOLID; + qxl_brush->u.color = brush->iSolidColor; + DEBUG_PRINT((pdev, 11, "%s: color 0x%x\n", __FUNCTION__, qxl_brush->u.color)); + } + DEBUG_PRINT((pdev, 10, "%s: done\n", __FUNCTION__)); + return TRUE; +} + diff --git a/display/driver.c b/display/driver.c new file mode 100644 index 0000000..c9140af --- /dev/null +++ b/display/driver.c @@ -0,0 +1,1314 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "stddef.h" + +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include "os_dep.h" + +#include "winerror.h" +#include "windef.h" +#include "wingdi.h" +#include "winddi.h" +#include "devioctl.h" +#include "ntddvdeo.h" +#include "ioaccess.h" + +#include "qxldd.h" +#include "utils.h" +#include "mspace.h" +#include "res.h" + +#define DEVICE_NAME L"qxldd" + +#define QXLDD_DEBUG_PREFIX "qxldd: " + +static DRVFN drv_calls[] = { + {INDEX_DrvDisableDriver, (PFN)DrvDisableDriver}, + {INDEX_DrvEnablePDEV, (PFN)DrvEnablePDEV}, + {INDEX_DrvDisablePDEV, (PFN)DrvDisablePDEV}, + {INDEX_DrvCompletePDEV, (PFN)DrvCompletePDEV}, + {INDEX_DrvEnableSurface, (PFN)DrvEnableSurface}, + {INDEX_DrvDisableSurface, (PFN) DrvDisableSurface}, + {INDEX_DrvAssertMode, (PFN)DrvAssertMode}, + {INDEX_DrvGetModes, (PFN)DrvGetModes}, + {INDEX_DrvSynchronize, (PFN)DrvSynchronize}, + {INDEX_DrvCopyBits, (PFN)DrvCopyBits}, + {INDEX_DrvBitBlt, (PFN)DrvBitBlt}, + {INDEX_DrvTextOut, (PFN)DrvTextOut}, + {INDEX_DrvStrokePath, (PFN)DrvStrokePath}, + {INDEX_DrvRealizeBrush, (PFN)DrvRealizeBrush}, + {INDEX_DrvSetPointerShape, (PFN)DrvSetPointerShape}, + {INDEX_DrvMovePointer, (PFN)DrvMovePointer}, + {INDEX_DrvStretchBlt, (PFN)DrvStretchBlt}, + {INDEX_DrvStretchBltROP, (PFN)DrvStretchBltROP}, + {INDEX_DrvTransparentBlt, (PFN)DrvTransparentBlt}, + {INDEX_DrvAlphaBlend, (PFN)DrvAlphaBlend}, + +#ifdef CALL_TEST + {INDEX_DrvFillPath, (PFN)DrvFillPath}, + {INDEX_DrvGradientFill, (PFN)DrvGradientFill}, + {INDEX_DrvLineTo, (PFN)DrvLineTo}, + {INDEX_DrvPlgBlt, (PFN)DrvPlgBlt}, + {INDEX_DrvStrokeAndFillPath, (PFN)DrvStrokeAndFillPath}, +#endif +}; + +#ifdef CALL_TEST + +typedef struct CallCounter { + const char *name; + BOOL effective; +} CallCounterInfo; + +static CallCounterInfo counters_info[NUM_CALL_COUNTERS] = { + { "DrvCopyBits", FALSE}, + { "DrvBitBlt", TRUE}, + { "DrvTextOut", TRUE}, + { "DrvStrokePath", TRUE}, + { "DrvStretchBlt", FALSE}, + { "DrvStretchBltROP", TRUE}, + { "TransparentBlt", FALSE}, + { "DrvAlphaBlend", FALSE}, + + { "DrvFillPath", FALSE}, + { "DrvGradientFill", FALSE}, + { "DrvLineTo", FALSE}, + { "DrvPlgBlt", FALSE}, + { "DrvStrokeAndFillPath", FALSE}, +}; + +#endif + +#define DBG_LEVEL 0 + +void DebugPrintV(PDev *pdev, const char *message, va_list ap) +{ + if (pdev && pdev->log_buf) { + EngAcquireSemaphore(pdev->print_sem); + _snprintf(pdev->log_buf, QXL_LOG_BUF_SIZE, QXLDD_DEBUG_PREFIX); + _vsnprintf(pdev->log_buf + strlen(QXLDD_DEBUG_PREFIX), + QXL_LOG_BUF_SIZE - strlen(QXLDD_DEBUG_PREFIX), message, ap); + WRITE_PORT_UCHAR(pdev->log_port, 0); + EngReleaseSemaphore(pdev->print_sem); + } else { + EngDebugPrint(QXLDD_DEBUG_PREFIX, (PCHAR)message, ap); + } +} + +void DebugPrint(PDev *pdev, int level, const char *message, ...) +{ + va_list ap; + + if (level >= (pdev ? (int)*pdev->log_level : DBG_LEVEL)) { + return; + } + va_start(ap, message); + DebugPrintV(pdev, message, ap); + va_end(ap); +} + +#define DRIVER_VERSION 1 +#define OS_VERSION_MAJOR 5 +#define OS_VERSION_MINOR 0 +#define MK_GDIINFO_VERSION(os_major, os_minor, drv_vers) \ + ((drv_vers) | ((os_minor) << 8) | ((os_major) << 12)) + + +GDIINFO gdi_default = { + MK_GDIINFO_VERSION(OS_VERSION_MAJOR, OS_VERSION_MINOR, DRIVER_VERSION), + DT_RASDISPLAY, + 0, //ulHorzSize + 0, //ulVertSize + 0, //ulHorzRes + 0, //ulVertRes + 0, //cBitsPixel + 0, //cPlanes + 0, //ulNumColors + 0, //flRaster + 0, //ulLogPixelsX + 0, //ulLogPixelsY + TC_RA_ABLE, //flTextCaps + 0, //ulDACRed + 0, //ulDACGreen + 0, //ulDACBlue + 0x0024, //ulAspectX + 0x0024, //ulAspectY + 0x0033, //ulAspectXY + 1, //xStyleStep + 1, //yStyleSte; + 3, //denStyleStep + { 0, 0}, //ptlPhysOffset + { 0, 0}, //szlPhysSize + 0, //ulNumPalReg + + { //ciDevice + { 6700, 3300, 0}, //Red + { 2100, 7100, 0}, //Green + { 1400, 800, 0}, //Blue + { 1750, 3950, 0}, //Cyan + { 4050, 2050, 0}, //Magenta + { 4400, 5200, 0}, //Yellow + { 3127, 3290, 0}, //AlignmentWhite + 20000, //RedGamma + 20000, //GreenGamma + 20000, //BlueGamma + 0, 0, 0, 0, 0, 0 //No dye correction for raster displays + }, + + 0, //ulDevicePelsDPI + PRIMARY_ORDER_CBA, //ulPrimaryOrder + HT_PATSIZE_4x4_M, //ulHTPatternSize + HT_FORMAT_8BPP, //ulHTOutputFormat + HT_FLAG_ADDITIVE_PRIMS, //flHTFlags + 0, //ulVRefresh + 0, //ulPanningHorzRes + 0, //ulPanningVertRes + 0, //ulBltAlignment + //more +}; + +#define SYSTM_LOGFONT {16, 7, 0, 0, 700, 0, 0, 0,ANSI_CHARSET, OUT_DEFAULT_PRECIS,\ + CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY,\ + VARIABLE_PITCH | FF_DONTCARE, L"System"} +#define HELVE_LOGFONT {12, 9, 0, 0, 400, 0, 0, 0, ANSI_CHARSET, OUT_DEFAULT_PRECIS,\ + CLIP_STROKE_PRECIS, PROOF_QUALITY,\ + VARIABLE_PITCH | FF_DONTCARE, L"MS Sans Serif"} +#define COURI_LOGFONT {12, 9, 0, 0, 400, 0, 0, 0, ANSI_CHARSET,OUT_DEFAULT_PRECIS,\ + CLIP_STROKE_PRECIS,PROOF_QUALITY,\ + FIXED_PITCH | FF_DONTCARE, L"Courier"} + +DEVINFO dev_default = { + GCAPS_ARBRUSHOPAQUE | GCAPS_ARBRUSHTEXT | GCAPS_ASYNCMOVE | GCAPS_BEZIERS | + /*GCAPS_FONT_RASTERIZER |*/ /*for now GCAPS_GEOMETRICWIDE |*/ GCAPS_GRAY16 | GCAPS_OPAQUERECT | + GCAPS_VECTORFONT | GCAPS_WINDINGFILL /*| GCAPS_LAYERED*/, + SYSTM_LOGFONT, //lfDefaultFont + HELVE_LOGFONT, //lfAnsiVarFont + COURI_LOGFONT, //lfAnsiFixFont + 0, //cFonts + 0, //iDitherFormat + 0, //cxDither + 0, //cyDither + 0, //hpalDefault +#if (WINVER >= 0x0501) + GCAPS2_MOUSETRAILS | +#endif + GCAPS2_ALPHACURSOR, +}; + +static void mspace_print(void *user_data, char *format, ...) +{ + PDev *pdev = (PDev *)user_data; + va_list ap; + + va_start(ap, format); + DebugPrintV(pdev, format, ap); + va_end(ap); +} + +static void mspace_abort(void *user_data) +{ + mspace_print(user_data, "mspace abort"); + EngDebugBreak(); +} + +BOOL DrvEnableDriver(ULONG engine_version, ULONG enable_data_size, PDRVENABLEDATA enable_data) +{ + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + enable_data->iDriverVersion = DDI_DRIVER_VERSION_NT5; + enable_data->c = sizeof(drv_calls) / sizeof(DRVFN); + enable_data->pdrvfn = drv_calls; + mspace_set_abort_func(mspace_abort); + mspace_set_print_func(mspace_print); + ResInitGlobals(); + DEBUG_PRINT((NULL, 1, "%s: end\n", __FUNCTION__)); + return TRUE; +} + +VOID DrvDisableDriver(VOID) +{ + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + ResDestroyGlobals(); +} + +DWORD GetAvailableModes(HANDLE driver, PVIDEO_MODE_INFORMATION *mode_info, + DWORD *mode_info_size) +{ + ULONG n; + VIDEO_NUM_MODES modes; + PVIDEO_MODE_INFORMATION info; + + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + + if (EngDeviceIoControl(driver, IOCTL_VIDEO_QUERY_NUM_AVAIL_MODES, NULL, 0, + &modes, sizeof(VIDEO_NUM_MODES), &n)) { + DEBUG_PRINT((NULL, 0, "%s: query num modes failed\n", __FUNCTION__)); + return 0; + } + + info = (PVIDEO_MODE_INFORMATION)EngAllocMem(FL_ZERO_MEMORY, + modes.NumModes * modes.ModeInformationLength, + ALLOC_TAG); + if (!info) { + DEBUG_PRINT((NULL, 0, "%s: memory allocation failed\n", __FUNCTION__)); + return 0; + } + + if (EngDeviceIoControl(driver, IOCTL_VIDEO_QUERY_AVAIL_MODES, NULL, 0, info, + modes.NumModes * modes.ModeInformationLength, &n)) { + DEBUG_PRINT((NULL, 0, "%s: query modes failed\n", __FUNCTION__)); + EngFreeMem(info); + return 0; + } + + *mode_info = info; + *mode_info_size = modes.ModeInformationLength; + + n = modes.NumModes; + while ( n-- ) { + if ( (info->NumberOfPlanes != 1 ) ||!(info->AttributeFlags & VIDEO_MODE_GRAPHICS) + ||((info->BitsPerPlane != 16) && (info->BitsPerPlane != 32)) + || (info->VisScreenWidth > 2000) || (info->VisScreenWidth < 480) + || (info->VisScreenHeight > 2000) || (info->VisScreenHeight < 480) ) { + + DEBUG_PRINT((NULL, 1, "%s: unsuported mode rejecting miniport mode\n", __FUNCTION__)); + DEBUG_PRINT((NULL, 1, " width = %li height = %li\n", + info->VisScreenWidth, info->VisScreenHeight)); + DEBUG_PRINT((NULL, 1, " bpp = %li freq = %li\n", + info->BitsPerPlane * info->NumberOfPlanes, info->Frequency)); + info->Length = 0; + } + + info = (PVIDEO_MODE_INFORMATION) (((PUCHAR)info) + modes.ModeInformationLength); + } + DEBUG_PRINT((NULL, 1, "%s: OK num modes %lu\n", __FUNCTION__, modes.NumModes)); + return modes.NumModes; +} + +BOOL InitializeModeFields(PDev *pdev, GDIINFO *gdi_info, DEVINFO *dev_info, + DEVMODEW *dev_mode) +{ + ULONG n_modes; + PVIDEO_MODE_INFORMATION video_buff; + PVIDEO_MODE_INFORMATION selected_mode; + PVIDEO_MODE_INFORMATION video_mode; + VIDEO_MODE_INFORMATION vmi; + ULONG video_mode_size; + + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + + n_modes = GetAvailableModes(pdev->driver, &video_buff, &video_mode_size); + if ( n_modes == 0 ) { + DEBUG_PRINT((NULL, 0, "%s: get available modes failed\n", __FUNCTION__)); + return FALSE; + } + +#if (WINVER < 0x0501) + DEBUG_PRINT((NULL, 1, "%s: req mode: fields %u bits %u w %u h %u frequency %u\n", + __FUNCTION__, + dev_mode->dmFields, + dev_mode->dmBitsPerPel, + dev_mode->dmPelsWidth, + dev_mode->dmPelsHeight, + dev_mode->dmDisplayFrequency)); +#else + DEBUG_PRINT((NULL, 1, "%s: req mode: fields %u bits %u w %u h %u frequency %u orientation %u\n", + __FUNCTION__, + dev_mode->dmFields, + dev_mode->dmBitsPerPel, + dev_mode->dmPelsWidth, + dev_mode->dmPelsHeight, + dev_mode->dmDisplayFrequency, + dev_mode->dmDisplayOrientation)); +#endif + + + selected_mode = NULL; + video_mode = video_buff; + + while (n_modes--) { + if ( video_mode->Length != 0 ) { + DEBUG_PRINT((NULL, 1, "%s: check width = %li height = %li\n", + __FUNCTION__, + video_mode->VisScreenWidth, + video_mode->VisScreenHeight)); + DEBUG_PRINT((NULL, 1, " bpp = %li freq = %li\n", + video_mode->BitsPerPlane * video_mode->NumberOfPlanes, + video_mode->Frequency)); + + if ( (video_mode->VisScreenWidth == dev_mode->dmPelsWidth) + && (video_mode->VisScreenHeight == dev_mode->dmPelsHeight) + && (video_mode->BitsPerPlane * video_mode->NumberOfPlanes + == dev_mode->dmBitsPerPel) + && (video_mode->Frequency == dev_mode->dmDisplayFrequency) +#if (WINVER >= 0x0501) + && (video_mode->DriverSpecificAttributeFlags + == dev_mode->dmDisplayOrientation) +#endif + ) { + selected_mode = video_mode; + DEBUG_PRINT((NULL, 1, "%s: found\n", __FUNCTION__)); + break; + } + } + video_mode = (PVIDEO_MODE_INFORMATION)(((PUCHAR)video_mode) + video_mode_size); + } + + if (!selected_mode) { + DEBUG_PRINT((NULL, 0, "%s: not found\n")); + EngFreeMem(video_buff); + return FALSE; + } + + vmi = *selected_mode; + EngFreeMem(video_buff); + + pdev->video_mode_index = vmi.ModeIndex; + pdev->resolution.cx = vmi.VisScreenWidth; + pdev->resolution.cy = vmi.VisScreenHeight; + pdev->max_bitmap_size = pdev->resolution.cx * pdev->resolution.cy; + pdev->max_bitmap_size += pdev->max_bitmap_size / 2; + pdev->stride = vmi.ScreenStride; + + *gdi_info = gdi_default; + + gdi_info->ulHorzSize = vmi.XMillimeter; + gdi_info->ulVertSize = vmi.YMillimeter; + gdi_info->ulHorzRes = vmi.VisScreenWidth; + gdi_info->ulVertRes = vmi.VisScreenHeight; + gdi_info->cBitsPixel = vmi.BitsPerPlane; + gdi_info->cPlanes = vmi.NumberOfPlanes; + gdi_info->ulVRefresh = vmi.Frequency; + gdi_info->ulDACRed = vmi.NumberRedBits; + gdi_info->ulDACGreen = vmi.NumberGreenBits; + gdi_info->ulDACBlue = vmi.NumberBlueBits; + gdi_info->ulLogPixelsX = dev_mode->dmLogPixels; + gdi_info->ulLogPixelsY = dev_mode->dmLogPixels; + + *dev_info = dev_default; + + switch ( vmi.BitsPerPlane ) { + case 16: + pdev->bitmap_format = BMF_16BPP; + pdev->red_mask = vmi.RedMask; + pdev->green_mask = vmi.GreenMask; + pdev->blue_mask = vmi.BlueMask; + + gdi_info->ulNumColors = (ULONG)-1; + gdi_info->ulNumPalReg = 0; + gdi_info->ulHTOutputFormat = HT_FORMAT_16BPP; + + dev_info->iDitherFormat = BMF_16BPP; + break; + case 32: + pdev->bitmap_format = BMF_32BPP; + pdev->red_mask = vmi.RedMask; + pdev->green_mask = vmi.GreenMask; + pdev->blue_mask = vmi.BlueMask; + + gdi_info->ulNumColors = (ULONG)-1; + gdi_info->ulNumPalReg = 0; + gdi_info->ulHTOutputFormat = HT_FORMAT_32BPP; + + dev_info->iDitherFormat = BMF_32BPP; + break; + default: + DEBUG_PRINT((NULL, 0, "%s: bit depth not supported\n", __FUNCTION__)); + return FALSE; + } + DEBUG_PRINT((NULL, 1, "%s: exit\n", __FUNCTION__)); + return TRUE; +} + +void DestroyPalette(PDev *pdev) +{ + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + if (pdev->palette) { + EngDeletePalette(pdev->palette); + pdev->palette = NULL; + } + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); +} + +BOOL InitPalette(PDev *pdev, DEVINFO *dev_info) +{ + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + if ((pdev->palette = EngCreatePalette(PAL_BITFIELDS, 0, NULL, + pdev->red_mask, + pdev->green_mask, + pdev->blue_mask)) == NULL) { + DEBUG_PRINT((NULL, 0, "%s: create palette failed\n", __FUNCTION__)); + return FALSE; + } + dev_info->hpalDefault = pdev->palette; + + DEBUG_PRINT((NULL, 1, "%s: OK\n", __FUNCTION__)); + return TRUE; +} + +DHPDEV DrvEnablePDEV(DEVMODEW *dev_mode, PWSTR ignore1, ULONG ignore2, HSURF *ignore3, + ULONG dev_caps_size, ULONG *dev_caps, ULONG dev_inf_size, + DEVINFO *in_dev_info, HDEV gdi_dev, PWSTR device_name, HANDLE driver) +{ + PDev *pdev; + GDIINFO gdi_info; + DEVINFO dev_info; + + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + + if (!(pdev = (PDev*)EngAllocMem(FL_ZERO_MEMORY, sizeof(PDev), ALLOC_TAG))) { + DEBUG_PRINT((NULL, 0, "%s: pdev alloc failed\n", __FUNCTION__)); + return NULL; + } + + RtlZeroMemory(&gdi_info, sizeof(GDIINFO)); + RtlCopyMemory(&gdi_info, dev_caps, MIN(dev_caps_size, sizeof(GDIINFO))); + + RtlZeroMemory(&dev_info, sizeof(DEVINFO)); + RtlCopyMemory(&dev_info, in_dev_info, MIN(dev_inf_size, sizeof(DEVINFO))); + + pdev->driver = driver; + + if (!InitializeModeFields(pdev, &gdi_info, &dev_info, dev_mode)) { + DEBUG_PRINT((NULL, 0, "%s: init mode failed\n", __FUNCTION__)); + goto err1; + } + + if (!InitPalette(pdev, &dev_info)) { + DEBUG_PRINT((NULL, 0, "%s: init palet failed\n", __FUNCTION__)); + goto err1; + } + + if (!(pdev->malloc_sem = EngCreateSemaphore())) { + DEBUG_PRINT((NULL, 0, "%s: create malloc sem failed\n", __FUNCTION__)); + goto err2; + } + + if (!(pdev->print_sem = EngCreateSemaphore())) { + DEBUG_PRINT((NULL, 0, "%s: create malloc sem failed\n", __FUNCTION__)); + goto err3; + } + + if (!ResInit(pdev)) { + DEBUG_PRINT((NULL, 0, "%s: init res failed\n", __FUNCTION__)); + goto err4; + } + + RtlCopyMemory(dev_caps, &gdi_info, dev_caps_size); + RtlCopyMemory(in_dev_info, &dev_info, dev_inf_size); + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + return(DHPDEV)pdev; + +err4: + EngDeleteSemaphore(pdev->print_sem); + +err3: + EngDeleteSemaphore(pdev->malloc_sem); + +err2: + DestroyPalette(pdev); + +err1: + EngFreeMem(pdev); + + return NULL; +} + +VOID DrvDisablePDEV(DHPDEV in_pdev) +{ + PDev* pdev = (PDev*)in_pdev; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + ResDestroy(pdev); + DestroyPalette(pdev); + EngDeleteSemaphore(pdev->malloc_sem); + EngDeleteSemaphore(pdev->print_sem); + EngFreeMem(pdev); + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); +} + +VOID DrvCompletePDEV(DHPDEV in_pdev, HDEV gdi_dev) +{ + PDev* pdev = (PDev*)in_pdev; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + pdev->eng = gdi_dev; + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); +} + +BOOL SetHardwareMode(PDev *pdev) +{ + VIDEO_MODE_INFORMATION video_info; + DWORD length; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx mode %lu\n", __FUNCTION__, pdev, pdev->video_mode_index)); + + if (EngDeviceIoControl(pdev->driver, IOCTL_VIDEO_SET_CURRENT_MODE, + &pdev->video_mode_index, sizeof(DWORD), + NULL, 0, &length)) { + DEBUG_PRINT((NULL, 0, "%s: set mode failed, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx OK\n", __FUNCTION__, pdev)); + return TRUE; +} + +static BOOL InitDrawArea(PDev *pdev, UINT8 *base) +{ + HSURF bitmap; + SIZEL size; + SURFOBJ* surf_obj; + + size.cx = pdev->resolution.cx; + size.cy = pdev->resolution.cy; + + if (!(bitmap = (HSURF)EngCreateBitmap(size, size.cx << 2, BMF_32BPP, 0, base))) { + DEBUG_PRINT((pdev, 0, "%s: EngCreateBitmap failed\n", __FUNCTION__)); + return FALSE; + } + + if (!EngAssociateSurface(bitmap, pdev->eng, 0)) { + DEBUG_PRINT((pdev, 0, "%s: EngAssociateSurface failed\n", __FUNCTION__)); + goto error; + + } + + if (!(surf_obj = EngLockSurface(bitmap))) { + DEBUG_PRINT((pdev, 0, "%s: EngLockSurface failed\n", __FUNCTION__)); + goto error; + } + + pdev->draw_bitmap = bitmap; + pdev->draw_surf = surf_obj; + return TRUE; + +error: + EngDeleteSurface(bitmap); + return FALSE; +} + +BOOL PrepareHardware(PDev *pdev) +{ + VIDEO_MEMORY video_mem; + VIDEO_MEMORY_INFORMATION video_mem_Info; + DWORD length; + QXLDriverInfo dev_info; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + if (!SetHardwareMode(pdev)) { + DEBUG_PRINT((NULL, 0, "%s: set mode failed, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + + if (EngDeviceIoControl( pdev->driver, IOCTL_QXL_GET_INFO, NULL, + 0, &dev_info, sizeof(QXLDriverInfo), &length) ) { + DEBUG_PRINT((NULL, 0, "%s: get qxl info failed, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + + if (dev_info.version != QXL_DRIVER_INFO_VERSION) { + DEBUG_PRINT((NULL, 0, "%s: get qxl info mismatch, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + + pdev->cmd_ring = dev_info.cmd_ring; + pdev->cursor_ring = dev_info.cursor_ring; + pdev->release_ring = dev_info.release_ring; + pdev->notify_cmd_port = dev_info.notify_cmd_port; + pdev->notify_cursor_port = dev_info.notify_cursor_port; + pdev->notify_oom_port = dev_info.notify_oom_port; + pdev->display_event = dev_info.display_event; + pdev->cursor_event = dev_info.cursor_event; + pdev->sleep_event = dev_info.sleep_event; +#if (WINVER < 0x0501) + pdev->WaitForEvent = dev_info.WaitForEvent; +#endif + + pdev->num_io_pages = dev_info.num_io_pages; + pdev->io_pages_virt = dev_info.io_pages_virt; + pdev->io_pages_phys = dev_info.io_pages_phys; + + pdev->dev_update_id = dev_info.update_id; + pdev->update_id = *pdev->dev_update_id; + + pdev->update_area_port = dev_info.update_area_port; + pdev->update_area = dev_info.update_area; + + pdev->mm_clock = dev_info.mm_clock; + + pdev->compression_level = dev_info.compression_level; + + pdev->log_port = dev_info.log_port; + pdev->log_buf = dev_info.log_buf; + pdev->log_level = dev_info.log_level; + + video_mem.RequestedVirtualAddress = NULL; + + if (EngDeviceIoControl( pdev->driver, IOCTL_VIDEO_MAP_VIDEO_MEMORY, &video_mem, + sizeof(VIDEO_MEMORY), &video_mem_Info, + sizeof(video_mem_Info), &length) ) { + DEBUG_PRINT((NULL, 0, "%s: mapping failed, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + DEBUG_PRINT((NULL, 1, "%s: 0x%lx vals 0x%lx %ul\n", __FUNCTION__, pdev, + video_mem_Info.FrameBufferBase, video_mem_Info.FrameBufferLength)); + pdev->fb = (BYTE*)video_mem_Info.FrameBufferBase; + pdev->fb_size = video_mem_Info.FrameBufferLength; + + if (!InitDrawArea(pdev, dev_info.draw_area)) { + DEBUG_PRINT((NULL, 0, "%s: InitDrawArea failed, 0x%lx\n", __FUNCTION__, pdev)); + return FALSE; + } + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit: 0x%lx %ul\n", __FUNCTION__, pdev, + pdev->fb, pdev->fb_size)); + return TRUE; +} + +HSURF DrvEnableSurface(DHPDEV in_pdev) +{ + PDev *pdev; + HSURF surf; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, in_pdev)); + pdev = (PDev*)in_pdev; + if (!PrepareHardware(pdev)) { + return NULL; + } + + InitResources(pdev); + + DEBUG_PRINT((NULL, 1, "%s: EngCreateDeviceSurface(0x%lx, %ld:%ld, %lu)\n", + __FUNCTION__, + pdev, + pdev->resolution.cx, + pdev->resolution.cy, + pdev->bitmap_format)); + + if (!(surf = (HSURF)EngCreateDeviceSurface((DHSURF)pdev, pdev->resolution, + pdev->bitmap_format))) { + DEBUG_PRINT((NULL, 0, "%s: create device surface failed, 0x%lx\n", + __FUNCTION__, pdev)); + goto err; + } + + DEBUG_PRINT((NULL, 1, "%s: EngModifySurface(0x%lx, 0x%lx, 0, MS_NOTSYSTEMMEMORY, " + "0x%lx, 0x%lx, %lu, NULL)\n", + __FUNCTION__, + surf, + pdev->eng, + pdev, + pdev->fb, + pdev->stride)); + + pdev->surf = surf; + if (!EngModifySurface(surf, pdev->eng, + HOOK_SYNCHRONIZE | HOOK_COPYBITS | HOOK_BITBLT | HOOK_TEXTOUT | + HOOK_STROKEPATH | HOOK_STRETCHBLT | HOOK_STRETCHBLTROP | + HOOK_TRANSPARENTBLT | HOOK_ALPHABLEND +#ifdef CALL_TEST + | HOOK_PLGBLT | HOOK_FILLPATH | HOOK_STROKEANDFILLPATH | HOOK_LINETO | + HOOK_GRADIENTFILL +#endif + , + MS_NOTSYSTEMMEMORY, + (DHSURF)pdev, + NULL, + 0, + NULL)) { + DEBUG_PRINT((NULL, 0, "%s: modify surface failed, 0x%lx\n", + __FUNCTION__, pdev)); + goto err; + } + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); + return surf; + + err: + DrvDisableSurface((DHPDEV)pdev); + DEBUG_PRINT((NULL, 0, "%s: 0x%lx err\n", __FUNCTION__, pdev)); + return NULL; +} + +VOID DrvDisableSurface(DHPDEV in_pdev) +{ + PDev *pdev = (PDev*)in_pdev; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + if (pdev->surf) { + EngDeleteSurface(pdev->surf); + pdev->surf = NULL; + } + + if (pdev->draw_surf) { + EngUnlockSurface(pdev->draw_surf); + pdev->draw_surf = NULL; + EngDeleteSurface(pdev->draw_bitmap); + pdev->draw_bitmap = NULL; + } + + if (pdev->fb) { + VIDEO_MEMORY video_mem; + DWORD length; + + video_mem.RequestedVirtualAddress = pdev->fb; + pdev->fb = 0; + pdev->fb_size = 0; + if (EngDeviceIoControl(pdev->driver, + IOCTL_VIDEO_UNMAP_VIDEO_MEMORY, + &video_mem, + sizeof(video_mem), + NULL, + 0, + &length)) { + DEBUG_PRINT((NULL, 0, "%s: unmpap failed, 0x%lx\n", __FUNCTION__, pdev)); + } + } + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); +} + +BOOL DrvAssertMode(DHPDEV in_pdev, BOOL enable) +{ + PDev* pdev = (PDev*)in_pdev; + BOOL ret; + + DEBUG_PRINT((NULL, 1, "%s: 0x%lx\n", __FUNCTION__, pdev)); + if (enable) { + ret = SetHardwareMode(pdev); + InitResources(pdev); + } else { + DWORD length; + if (EngDeviceIoControl(pdev->driver, IOCTL_VIDEO_RESET_DEVICE, + NULL, 0, NULL, 0, &length)) { + DEBUG_PRINT((NULL, 0, "%s: reset failed 0x%lx\n", __FUNCTION__, pdev)); + ret = FALSE; + } else { + ret = TRUE; + } + } + DEBUG_PRINT((NULL, 1, "%s: 0x%lx exit %s\n", __FUNCTION__, pdev, ret ? "TRUE" : "FALSE")); + return ret; +} + +ULONG DrvGetModes(HANDLE driver, ULONG dev_modes_size, DEVMODEW *dev_modes) +{ + PVIDEO_MODE_INFORMATION video_modes; + PVIDEO_MODE_INFORMATION curr_video_mode; + DWORD mode_size; + DWORD output_size; + DWORD n_modes; + + DEBUG_PRINT((NULL, 1, "%s\n", __FUNCTION__)); + + n_modes = GetAvailableModes(driver, &video_modes, &mode_size); + + if (!n_modes) { + DEBUG_PRINT((NULL, 0, "%s: get available modes failed\n", __FUNCTION__)); + return 0; + } + + if (!dev_modes) { + DEBUG_PRINT((NULL, 1, "%s: query size\n", __FUNCTION__)); + output_size = n_modes * sizeof(DEVMODEW); + goto out; + } + + if (dev_modes_size < n_modes * sizeof(DEVMODEW)) { + DEBUG_PRINT((NULL, 0, "%s: buf to small\n", __FUNCTION__)); + output_size = 0; + goto out; + } + + output_size = 0; + curr_video_mode = video_modes; + do { + if (curr_video_mode->Length != 0) { + RtlZeroMemory(dev_modes, sizeof(DEVMODEW)); + ASSERT(NULL, sizeof(DEVICE_NAME) < sizeof(dev_modes->dmDeviceName)); + RtlCopyMemory(dev_modes->dmDeviceName, DEVICE_NAME, sizeof(DEVICE_NAME)); + dev_modes->dmSpecVersion = DM_SPECVERSION; + dev_modes->dmDriverVersion = DM_SPECVERSION; + dev_modes->dmSize = sizeof(DEVMODEW); + dev_modes->dmBitsPerPel = curr_video_mode->NumberOfPlanes * + curr_video_mode->BitsPerPlane; + dev_modes->dmPelsWidth = curr_video_mode->VisScreenWidth; + dev_modes->dmPelsHeight = curr_video_mode->VisScreenHeight; + dev_modes->dmDisplayFrequency = curr_video_mode->Frequency; + dev_modes->dmFields = DM_BITSPERPEL | DM_PELSWIDTH | DM_PELSHEIGHT | + DM_DISPLAYFREQUENCY | DM_DISPLAYFLAGS; +#if (WINVER >= 0x0501) + dev_modes->dmDisplayOrientation = curr_video_mode->DriverSpecificAttributeFlags; + dev_modes->dmFields |= DM_DISPLAYORIENTATION; +#endif + + DEBUG_PRINT((NULL, 1, "%s: mode: w %u h %u bits %u frequency %u\n", + __FUNCTION__, + dev_modes->dmPelsWidth, + dev_modes->dmPelsHeight, + dev_modes->dmBitsPerPel, + dev_modes->dmDisplayFrequency)); +#if (WINVER >= 0x0501) + DEBUG_PRINT((NULL, 1, " orientation %u\n", + dev_modes->dmDisplayOrientation)); +#endif + output_size += sizeof(DEVMODEW); + dev_modes++; + } + curr_video_mode = (PVIDEO_MODE_INFORMATION)(((PUCHAR)curr_video_mode) + mode_size); + } while (--n_modes); + + out: + + EngFreeMem(video_modes); + DEBUG_PRINT((NULL, 1, "%s: exit %u\n", __FUNCTION__, output_size)); + return output_size; +} + +VOID DrvSynchronize(DHPDEV in_pdev, RECTL *ignored) +{ + PDev* pdev = (PDev*)in_pdev; + int notify; + + DEBUG_PRINT((pdev, 3, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + DEBUG_PRINT((pdev, 4, "%s: 0x%lx done\n", __FUNCTION__, pdev)); +} + +char *BitmapFormatToStr(int format) +{ + switch (format) { + case BMF_1BPP: + return "BMF_1BPP"; + case BMF_4BPP: + return "BMF_4BPP"; + case BMF_8BPP: + return "BMF_8BPP"; + case BMF_16BPP: + return "BMF_16BPP"; + case BMF_24BPP: + return "BMF_24BPP"; + case BMF_32BPP: + return "BMF_32BPP"; + case BMF_4RLE: + return "BMF_4RLE"; + case BMF_8RLE: + return "BMF_8RLE"; + case BMF_JPEG: + return "BMF_JPEG"; + case BMF_PNG: + return "BMF_PNG"; + default: + return "?"; + } +} + +char *BitmapTypeToStr(int type) +{ + switch (type) { + case STYPE_BITMAP: + return "STYPE_BITMAP"; + case STYPE_DEVICE: + return "STYPE_DEVICE"; + case STYPE_DEVBITMAP: + return "STYPE_DEVBITMAP"; + default: + return "?"; + } +} + +#include "rop.h" +#include "utils.h" +#include "res.h" + +FIX FlotaToFixed(FLOATL val, FLOATL scale) +{ + FLOATOBJ float_obj; + FIX ret; + + FLOATOBJ_SetFloat(&float_obj, val); + FLOATOBJ_MulFloat(&float_obj, scale); + + ret = FLOATOBJ_GetLong(&float_obj) << 4; + FLOATOBJ_MulLong(&float_obj, 16); + ret |= (0x0f & FLOATOBJ_GetLong(&float_obj)); + return ret; +} + +static BOOL GetGeometricAttr(PDev *pdev, QXLDrawable *drawable, LineAttr *q_line_attr, + LINEATTRS *line_attr, XFORMOBJ *width_transform) +{ + ULONG save_buf_size; + FLOATOBJ float_obj; + PVOID fpu_buf; + XFORML xform; + + ASSERT(pdev, width_transform); + ASSERT(pdev, LINE_CAP_ROUND == ENDCAP_ROUND && LINE_CAP_SQUARE == ENDCAP_SQUARE && + LINE_CAP_BUTT == ENDCAP_BUTT && LINE_JOIN_ROUND == JOIN_ROUND && + LINE_JOIN_BEVEL == JOIN_BEVEL && LINE_JOIN_MITER == JOIN_MITER); + + + save_buf_size = EngSaveFloatingPointState(NULL, 0); + if (!(fpu_buf = EngAllocMem( +#if !(WINVER < 0x0501) + FL_NONPAGED_MEMORY | +#endif + FL_ZERO_MEMORY, + save_buf_size, + ALLOC_TAG))) { + DEBUG_PRINT((pdev, 0, "%s: alloc mem failed\n", __FUNCTION__)); + return FALSE; + } + + if (!EngSaveFloatingPointState(fpu_buf, save_buf_size)) { + DEBUG_PRINT((pdev, 0, "%s: save fpu state failed\n", __FUNCTION__)); + goto err1; + } + + if (XFORMOBJ_iGetXform(width_transform ,&xform) == DDI_ERROR) { + DEBUG_PRINT((pdev, 0, "%s: err get xform\n", __FUNCTION__)); + goto err2; + } + + if (xform.eM11 != xform.eM22 || xform.eM12 != 0 || xform.eM21 != 0) { + DEBUG_PRINT((pdev, 0, "%s: complex\n", __FUNCTION__)); + goto err2; + } + + q_line_attr->join_style = (UINT8)line_attr->iJoin; + q_line_attr->end_style = (UINT8)line_attr->iEndCap; + + FLOATOBJ_SetLong(&float_obj, 1); + q_line_attr->miter_limit = FlotaToFixed(line_attr->eMiterLimit, + FLOATOBJ_GetFloat(&float_obj)); + q_line_attr->width = FlotaToFixed(line_attr->elWidth.e, xform.eM11); + + if (line_attr->fl & LA_STYLED) { + FIX *style; + FIX *end; + PFLOAT_LONG src_style = line_attr->pstyle; + UINT32 nseg; + + ASSERT(pdev, LA_STYLED == LINE_STYLED); + ASSERT(pdev, LA_STARTGAP == LINE_START_WITH_GAP); + q_line_attr->flags = (UINT8)(line_attr->fl & (LA_STYLED | LA_STARTGAP)); + nseg = (line_attr->fl & LA_ALTERNATE) ? 2 : line_attr->cstyle; + + if ( nseg > 100) { + goto err2; + } + + if (!(style = (FIX *)QXLGetBuf(pdev, drawable, &q_line_attr->style, + nseg * sizeof(UINT32)))) { + goto err2; + } + + if ((line_attr->fl & LA_ALTERNATE)) { + style[0] = style[1] = FlotaToFixed(FLOATOBJ_GetFloat(&float_obj), xform.eM11); + } else { + for ( end = style + nseg; style < end; style++, src_style++) { + *style = FlotaToFixed(src_style->e, xform.eM11); + } + } + q_line_attr->style_nseg = (UINT8)nseg; + } else { + q_line_attr->flags = 0; + drawable->u.stroke.attr.style_nseg = 0; + drawable->u.stroke.attr.style = 0; + } + EngRestoreFloatingPointState(fpu_buf); + EngFreeMem(fpu_buf); + return TRUE; + +err2: + EngRestoreFloatingPointState(fpu_buf); +err1: + EngFreeMem(fpu_buf); + return FALSE; +} + +static BOOL GetCosmeticAttr(PDev *pdev, QXLDrawable *drawable, LineAttr *q_line_attr, + LINEATTRS *line_attr) +{ + ASSERT(pdev, LINE_CAP_ROUND == ENDCAP_ROUND && LINE_CAP_SQUARE == ENDCAP_SQUARE && + LINE_CAP_BUTT == ENDCAP_BUTT && LINE_JOIN_ROUND == JOIN_ROUND && + LINE_JOIN_BEVEL == JOIN_BEVEL && LINE_JOIN_MITER == JOIN_MITER); + + q_line_attr->join_style = JOIN_MITER; + q_line_attr->end_style = ENDCAP_BUTT; + q_line_attr->width = 1 << 4; + q_line_attr->miter_limit = 0; + + if (line_attr->fl & LA_STYLED) { + PFLOAT_LONG src_style = line_attr->pstyle; + FIX *style; + FIX *end; + UINT32 nseg; + + ASSERT(pdev, LA_STYLED == LINE_STYLED); + ASSERT(pdev, LA_STARTGAP == LINE_START_WITH_GAP); + q_line_attr->flags = (UINT8)(line_attr->fl & (LA_STYLED | LA_STARTGAP)); + nseg = (line_attr->fl & LA_ALTERNATE) ? 2 : line_attr->cstyle; + if ( nseg > 100) { + return FALSE; + } + + if (!(style = (FIX *)QXLGetBuf(pdev, drawable, &q_line_attr->style, + nseg * sizeof(UINT32)))) { + return FALSE; + } + + if (line_attr->fl & LA_ALTERNATE) { + style[0] = style[1] = 1 << 4; + } else { + for ( end = style + nseg; style < end; style++, src_style++) { + *style = (*src_style).l << 4; + } + } + q_line_attr->style_nseg = (UINT8)nseg; + } else { + q_line_attr->flags = 0; + q_line_attr->style_nseg = 0; + q_line_attr->style = 0; + } + return TRUE; +} + +BOOL APIENTRY DrvStrokePath(SURFOBJ *surf, PATHOBJ *path, CLIPOBJ *clip, XFORMOBJ *width_transform, + BRUSHOBJ *brush, POINTL *brush_pos, LINEATTRS *line_attr, + MIX mix /*rop*/) +{ + QXLDrawable *drawable; + RECTFX fx_area; + RECTL area; + PDev *pdev; + ROP3Info *fore_rop; + ROP3Info *back_rop; + BOOL h_or_v_line; + + if (!(pdev = (PDev *)surf->dhpdev)) { + DEBUG_PRINT((NULL, 0, "%s: err no pdev\n", __FUNCTION__)); + return TRUE; + } + + CountCall(pdev, CALL_COUNTER_STROKE_PATH); + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + ASSERT(pdev, surf && path && line_attr && clip); + + + if (line_attr->fl & (LA_STYLED | LA_ALTERNATE | LA_GEOMETRIC)) { //for now + // punt back to GDI result in infinite recursion + //return EngStrokePath(surf, path, clip, width_transform, brush, brush_pos, line_attr, mix); + } + + if (line_attr->elWidth.l == 0 && (line_attr->fl & LA_GEOMETRIC)) { + DEBUG_PRINT((pdev, 1, "%s: width == 0\n", __FUNCTION__)); + return TRUE; + } + + PATHOBJ_vGetBounds(path, &fx_area); + FXToRect(&area, &fx_area); + + h_or_v_line = area.bottom == area.top + 1 || area.right == area.left + 1; + + if (clip) { + if (clip->iDComplexity == DC_TRIVIAL) { + clip = NULL; + } else { + SectRect(&clip->rclBounds, &area, &area); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 1, "%s: empty rect after clip\n", __FUNCTION__)); + return TRUE; + } + } + } + + if (!(drawable = Drawable(pdev, QXL_DRAW_STROKE, &area, clip))) { + return FALSE; + } + + fore_rop = &rops2[(mix - 1) & 0x0f]; + back_rop = &rops2[((mix >> 8) - 1) & 0x0f]; + + if (!((fore_rop->flags | back_rop->flags) & ROP3_BRUSH)) { + drawable->u.stroke.brush.type = BRUSH_TYPE_NONE; + } else if (!QXLGetBrush(pdev, drawable, &drawable->u.stroke.brush, brush, brush_pos)) { + goto err; + } + + if (!QXLGetPath(pdev, drawable, &drawable->u.stroke.path, path)) { + goto err; + } + // DrvStrokePath only draws foreground pixels, unless you support dotted + // lines, so you only care about the low-order byte. + drawable->u.stroke.fore_mode = fore_rop->method_data; + drawable->u.stroke.back_mode = back_rop->method_data; + + drawable->effect = (h_or_v_line) ? QXL_EFFECT_OPAQUE: QXL_EFFECT_BLEND; + + if (((line_attr->fl & LA_GEOMETRIC) ? + !GetGeometricAttr(pdev, drawable, &drawable->u.stroke.attr, line_attr, width_transform) : + !GetCosmeticAttr(pdev, drawable, &drawable->u.stroke.attr, line_attr))){ + goto err; + } + + if (drawable->u.stroke.attr.flags & LA_STYLED) { + drawable->effect = (fore_rop->effect == back_rop->effect) ? fore_rop->effect : + QXL_EFFECT_BLEND; + } else { + drawable->effect = fore_rop->effect; + } + + if (drawable->effect == QXL_EFFECT_OPAQUE && !h_or_v_line) { + drawable->effect = QXL_EFFECT_OPAQUE_BRUSH; + } + + PushDrawable(pdev, drawable); + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + return TRUE; + +err: + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; +} + +#ifdef CALL_TEST + +void CountCall(PDev *pdev, int counter) +{ + if (pdev->count_calls) { + int i; + + pdev->call_counters[counter]++; + if((++pdev->total_calls % 500) == 0) { + DEBUG_PRINT((pdev, 0, "total eng calls is %u\n", pdev->total_calls)); + for (i = 0; i < NUM_CALL_COUNTERS; i++) { + DEBUG_PRINT((pdev, 0, "%s count is %u\n", + counters_info[i].name, pdev->call_counters[i])); + } + } + pdev->count_calls = FALSE; + } else if (counters_info[counter].effective) { + pdev->count_calls = TRUE; + } +} + +BOOL APIENTRY DrvFillPath( + SURFOBJ *pso, + PATHOBJ *ppo, + CLIPOBJ *pco, + BRUSHOBJ *pbo, + POINTL *pptlBrushOrg, + MIX mix, + FLONG flOptions) +{ + PDev *pdev; + + pdev = (PDev *)pso->dhpdev; + CountCall(pdev, CALL_COUNTER_FILL_PATH); + + return EngFillPath(pso, ppo, pco, pbo, pptlBrushOrg, mix, flOptions); +} + +BOOL APIENTRY DrvGradientFill( + SURFOBJ *psoDest, + CLIPOBJ *pco, + XLATEOBJ *pxlo, + TRIVERTEX *pVertex, + ULONG nVertex, + PVOID pMesh, + ULONG nMesh, + RECTL *prclExtents, + POINTL *pptlDitherOrg, + ULONG ulMode) +{ + PDev *pdev; + + pdev = (PDev *)psoDest->dhpdev; + CountCall(pdev, CALL_COUNTER_GRADIENT_FILL); + return EngGradientFill(psoDest, pco, pxlo, pVertex, nVertex, pMesh, nMesh, prclExtents, + pptlDitherOrg, ulMode); +} + +BOOL APIENTRY DrvLineTo( + SURFOBJ *pso, + CLIPOBJ *pco, + BRUSHOBJ *pbo, + LONG x1, + LONG y1, + LONG x2, + LONG y2, + RECTL *prclBounds, + MIX mix) +{ + PDev *pdev; + + pdev = (PDev *)pso->dhpdev; + CountCall(pdev, CALL_COUNTER_LINE_TO); + return EngLineTo(pso, pco, pbo, x1, y1, x2, y2, prclBounds, mix); +} + +BOOL APIENTRY DrvPlgBlt( + SURFOBJ *psoTrg, + SURFOBJ *psoSrc, + SURFOBJ *psoMsk, + CLIPOBJ *pco, + XLATEOBJ *pxlo, + COLORADJUSTMENT *pca, + POINTL *pptlBrushOrg, + POINTFIX *pptfx, + RECTL *prcl, + POINTL *pptl, + ULONG iMode) +{ + if (psoSrc->iType == STYPE_BITMAP) { + PDev *pdev; + + ASSERT(NULL, psoTrg && psoTrg->iType != STYPE_BITMAP && psoTrg->dhpdev); + pdev = (PDev *)psoTrg->dhpdev; + CountCall(pdev, CALL_COUNTER_PLG_BLT); + } + return EngPlgBlt(psoTrg, psoSrc, psoMsk, pco, pxlo, pca, pptlBrushOrg, pptfx, prcl, pptl, + iMode); +} + +BOOL APIENTRY DrvStrokeAndFillPath( + SURFOBJ *pso, + PATHOBJ *ppo, + CLIPOBJ *pco, + XFORMOBJ *pxo, + BRUSHOBJ *pboStroke, + LINEATTRS *plineattrs, + BRUSHOBJ *pboFill, + POINTL *pptlBrushOrg, + MIX mixFill, + FLONG flOptions) +{ + PDev *pdev = (PDev *)pso->dhpdev; + CountCall(pdev, CALL_COUNTER_STROKE_AND_FILL_PATH); + return EngStrokeAndFillPath(pso, ppo, pco, pxo, pboStroke, plineattrs, pboFill, pptlBrushOrg, + mixFill, flOptions); +} + +#endif + diff --git a/display/driver.rc b/display/driver.rc new file mode 100644 index 0000000..1e3821a --- /dev/null +++ b/display/driver.rc @@ -0,0 +1,29 @@ +#include <windows.h>
+
+#include <ntverp.h>
+
+#define VER_FILETYPE VFT_DRV
+#define VER_FILESUBTYPE VFT2_DRV_DISPLAY
+
+#undef VER_COMPANYNAME_STR
+#undef VER_FILEVERSION_STR
+#undef VER_LEGALCOPYRIGHT_STR
+#undef VER_LEGALCOPYRIGHT_YEARS
+#undef VER_PRODUCTNAME_STR
+#undef VER_PRODUCTVERSION_STR
+
+
+#define VER_FILEDESCRIPTION_STR "Red Hat QXL Display Driver"
+#define VER_INTERNALNAME_STR "qxldd.dll"
+#define VER_ORIGINALFILENAME_STR VER_INTERNALNAME_STR
+#define VER_FILEVERSION_STR "1.1.0.0"
+#define VER_PRODUCTNAME_STR "Spice"
+#define VER_PRODUCTVERSION_STR VER_FILEVERSION_STR
+
+#undef VER_PRODUCTVERSION
+#define VER_PRODUCTVERSION 1,1,0,0
+
+#define VER_COMPANYNAME_STR "Red Hat Inc."
+#define VER_LEGALCOPYRIGHT_STR "© Red Hat Inc. All rights reserved."
+
+#include "common.ver"
\ No newline at end of file diff --git a/display/lookup3.c b/display/lookup3.c new file mode 100644 index 0000000..cef7b84 --- /dev/null +++ b/display/lookup3.c @@ -0,0 +1,18 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "..\common\lookup3.c" diff --git a/display/makefile b/display/makefile new file mode 100644 index 0000000..f719f40 --- /dev/null +++ b/display/makefile @@ -0,0 +1,2 @@ +!INCLUDE $(NTMAKEENV)\makefile.def
+
diff --git a/display/mspace.c b/display/mspace.c new file mode 100644 index 0000000..ec29fc6 --- /dev/null +++ b/display/mspace.c @@ -0,0 +1,2435 @@ +// based on dlmalloc from Doug Lea + + +// quote from the Doug Lea original file + /* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + + * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + */ + + +#include <ntddk.h> + +#include "mspace.h" + +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ + +#define MALLOC_ALIGNMENT ((size_t)8U) +#define USE_LOCKS 0 +#define malloc_getpagesize ((size_t)4096U) +#define DEFAULT_GRANULARITY malloc_getpagesize +#define MAX_SIZE_T (~(size_t)0) +#define MALLOC_FAILURE_ACTION +#define MALLINFO_FIELD_TYPE size_t +#define FOOTERS 0 +#define INSECURE 0 +#define PROCEED_ON_ERROR 0 +#define DEBUG 0 +#define ABORT_ON_ASSERT_FAILURE 1 +#define ABORT(user_data) abort_func(user_data) +#define USE_BUILTIN_FFS 0 +#define USE_DEV_RANDOM 0 +#define PRINT(params) print_func params + + +#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n) +#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n) + + +#define M_GRANULARITY (-1) + +void default_abort_func(void *user_data) +{ + for (;;); +} + +void default_print_func(void *user_data, char *format, ...) +{ +} + +static mspace_abort_t abort_func = default_abort_func; +static mspace_print_t print_func = default_print_func; + +void mspace_set_abort_func(mspace_abort_t f) +{ + abort_func = f; +} + +void mspace_set_print_func(mspace_print_t f) +{ + print_func = f; +} + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* NO_MALLINFO */ + + + +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(user_data, x) if(!(x)) ABORT(user_data) +#else /* ABORT_ON_ASSERT_FAILURE */ +#include <assert.h> +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(user_data, x) +#endif /* DEBUG */ + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some plaftorms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p) CHUNK_OVERHEAD + +/* Return true if malloced space is not necessarily cleared */ +#define calloc_must_clear(p) (1) + + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +}; + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; + void *user_data; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +//static struct malloc_state _gm_; +//#define gm (&_gm_) +//#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + + + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data) +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data) +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(i386) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(i386) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.default_mflags = USE_LOCK_BIT; + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + //INITIAL_LOCK(&gm->mutex); + //gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + + + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT(NULL); + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(m->user_data, ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(m->user_data, sp != 0); + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(m->user_data, ok_address(m, p)); + assert(m->user_data, sz == m->topsize); + assert(m->user_data, sz > 0); + assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(m->user_data, pinuse(p)); + assert(m->user_data, !next_pinuse(p)); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(m->user_data, cinuse(p)); + assert(m->user_data, next_pinuse(p)); + /* If not pinuse, previous chunk has OK offset */ + assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(m->user_data, !cinuse(p)); + assert(m->user_data, !next_pinuse(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0); + assert(m->user_data, is_aligned(chunk2mem(p))); + assert(m->user_data, next->prev_foot == sz); + assert(m->user_data, pinuse(p)); + assert(m->user_data, next == m->top || cinuse(next)); + assert(m->user_data, p->fd->bk == p); + assert(m->user_data, p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(m->user_data, sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0); + assert(m->user_data, sz >= MIN_CHUNK_SIZE); + assert(m->user_data, sz >= s); + /* size is less than MIN_CHUNK_SIZE more than request */ + assert(m->user_data, sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(m->user_data, tindex == idx); + assert(m->user_data, tsize >= MIN_LARGE_SIZE); + assert(m->user_data, tsize >= minsize_for_tree_index(idx)); + assert(m->user_data, (idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(m->user_data, u->index == tindex); + assert(m->user_data, chunksize(u) == tsize); + assert(m->user_data, !cinuse(u)); + assert(m->user_data, !next_pinuse(u)); + assert(m->user_data, u->fd->bk == u); + assert(m->user_data, u->bk->fd == u); + if (u->parent == 0) { + assert(m->user_data, u->child[0] == 0); + assert(m->user_data, u->child[1] == 0); + } + else { + assert(m->user_data, head == 0); /* only one node on chain has parent */ + head = u; + assert(m->user_data, u->parent != u); + assert(m->user_data, u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(m->user_data, u->child[0]->parent == u); + assert(m->user_data, u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(m->user_data, u->child[1]->parent == u); + assert(m->user_data, u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(m->user_data, chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(m->user_data, head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(m->user_data, empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(m->user_data, empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(m->user_data, small_index(size) == i); + assert(m->user_data, p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(m->user_data, pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(m->user_data, !bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(m->user_data, q == m->dv || bin_find(m, q)); + assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->user_data, m->dvsize == chunksize(m->dv)); + assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE); + assert(m->user_data, bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->user_data, m->topsize == chunksize(m->top)); + assert(m->user_data, m->topsize > 0); + assert(m->user_data, bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(m->user_data, total <= m->footprint); + assert(m->user_data, m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned long)(maxfp))); + PRINT((m->user_data, "system bytes = %10lu\n", (unsigned long)(fp))); + PRINT((m->user_data, "in use bytes = %10lu\n", (unsigned long)(used))); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert((M)->user_data, S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert((M)->user_data, P != B);\ + assert((M)->user_data, P != F);\ + assert((M)->user_data, chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert((M)->user_data, P != B);\ + assert((M)->user_data, P != F);\ + assert((M)->user_data, chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert((M)->user_data, is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendents + correspond properly to bit masks. We use the rightmost descendent + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); + + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert(m->user_data, (char*)oldfirst > (char*)q); + assert(m->user_data, pinuse(oldfirst)); + assert(m->user_data, qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(m->user_data, chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(m->user_data, chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + + p = newp; + } + + assert(m->user_data, chunksize(p) >= nb); + assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ----------------------------- user mspaces ---------------------------- */ + +static mstate init_user_mstate(char* tbase, size_t tsize, void *user_data) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + MEMCLEAR(m, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + m->user_data = user_data; + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked, void *user_data) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity, user_data); + set_lock(m, locked); + } + return (mspace)m; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(ms->user_data, chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(ms->user_data, chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + MEMCLEAR(mem, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + diff --git a/display/mspace.h b/display/mspace.h new file mode 100644 index 0000000..96b0593 --- /dev/null +++ b/display/mspace.h @@ -0,0 +1,150 @@ +#ifndef _H_MSPACE +#define _H_MSPACE + +#define NO_MALLINFO 0 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +//typedef unsigned long size_t; +typedef void (*mspace_abort_t)(void *user_data); +typedef void (*mspace_print_t)(void *user_data, char *format, ...); + +void mspace_set_abort_func(mspace_abort_t f); +void mspace_set_print_func(mspace_print_t f); + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +//mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +//size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked, void *user_data); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +//void** mspace_independent_calloc(mspace msp, size_t n_elements, +// size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +//void** mspace_independent_comalloc(mspace msp, size_t n_elements, +// size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +//int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +#endif diff --git a/display/pointer.c b/display/pointer.c new file mode 100644 index 0000000..270619b --- /dev/null +++ b/display/pointer.c @@ -0,0 +1,140 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "os_dep.h" +#include "qxldd.h" +#include "utils.h" +#include "res.h" + +/* DDK quote + +Calls to the pointer functions are serialized by GDI. This means two different threads in the +driver cannot execute the pointer functions simultaneously. + +*/ + +ULONG APIENTRY DrvSetPointerShape(SURFOBJ *surf, SURFOBJ *mask, SURFOBJ *color_pointer, + XLATEOBJ *color_trans, LONG hot_x, LONG hot_y, + LONG pos_x, LONG pos_y, RECTL *prcl, FLONG flags) +{ + QXLCursorCmd *cursor_cmd; + PDev *pdev; + + if (!(pdev = (PDev *)surf->dhpdev)) { + DEBUG_PRINT((NULL, 0, "%s: err no pdev\n", __FUNCTION__)); + return SPS_ERROR; + } + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + if (flags & SPS_CHANGE) { + BOOL ok; + cursor_cmd = CursorCmd(pdev); + cursor_cmd->type = QXL_CURSOR_SET; + if (flags & SPS_ALPHA) { + if (mask) { + DEBUG_PRINT((pdev, 0, "%s: SPS_ALPHA and mask \n", __FUNCTION__)); + } + ASSERT(pdev, color_pointer); + ok = GetAlphaCursor(pdev, cursor_cmd, hot_x, hot_y, color_pointer); + } else if (!mask) { + ok = GetTransparentCursor(pdev, cursor_cmd); + } else if (color_pointer && color_pointer->iBitmapFormat != BMF_1BPP) { + ASSERT(pdev, mask); + ok = GetColorCursor(pdev, cursor_cmd, hot_x, hot_y, color_pointer, mask, color_trans); + } else { + ok = GetMonoCursor(pdev, cursor_cmd, hot_x, hot_y, mask); + } + + if (!ok) { + DEBUG_PRINT((pdev, 0, "%s: get cursor failed\n", __FUNCTION__)); + ReleaseOutput(pdev, cursor_cmd->release_info.id); + return SPS_ERROR; + } + if (pos_x < 0) { + cursor_cmd->u.set.visible = FALSE; + cursor_cmd->u.set.position.x = cursor_cmd->u.set.position.y = 0; + } else { + cursor_cmd->u.set.visible = TRUE; + cursor_cmd->u.set.position.x = (INT16)pos_x; + cursor_cmd->u.set.position.y = (INT16)pos_y; + } + + PushCursorCmd(pdev, cursor_cmd); + + } else { + cursor_cmd = CursorCmd(pdev); + if (pos_x < 0) { +#ifdef DBG + DEBUG_PRINT((pdev, 0, "%s: no SPS_CHANGE and pos_x < 0\n", __FUNCTION__)); +#endif + cursor_cmd->type = QXL_CURSOR_HIDE; + } else { +#ifdef DBG + DEBUG_PRINT((pdev, 0, "%s: no SPS_CHANGE\n", __FUNCTION__)); +#endif + cursor_cmd->type = QXL_CURSOR_MOVE; + cursor_cmd->u.position.x = (INT16)pos_x; + cursor_cmd->u.position.y = (INT16)pos_y; + } + PushCursorCmd(pdev, cursor_cmd); + } +#if (WINVER >= 0x0501) + if ((flags & (SPS_LENGTHMASK | SPS_FREQMASK)) != pdev->cursor_trail){ + pdev->cursor_trail = (flags & (SPS_LENGTHMASK | SPS_FREQMASK)); + cursor_cmd = CursorCmd(pdev); + cursor_cmd->type = QXL_CURSOR_TRAIL; + cursor_cmd->u.trail.length = (UINT16)((flags & SPS_LENGTHMASK) >> 8); + cursor_cmd->u.trail.frequency = (UINT16)((flags & SPS_FREQMASK) >> 12); + PushCursorCmd(pdev, cursor_cmd); + } +#endif + + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + return SPS_ACCEPT_NOEXCLUDE; +} + +VOID APIENTRY DrvMovePointer(SURFOBJ *surf, LONG pos_x, LONG pos_y, RECTL *area) +{ + QXLCursorCmd *cursor_cmd; + PDev *pdev; + + if (!(pdev = (PDev *)surf->dhpdev)) { + DEBUG_PRINT((NULL, 0, "%s: err no pdev\n", __FUNCTION__)); + return; + } + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + if (pos_y < 0 && pos_x >= 0) { + DEBUG_PRINT((pdev, 0, "%s: unexpected negative y pos\n", __FUNCTION__)); + return; + } + + cursor_cmd = CursorCmd(pdev); + if (pos_x < 0) { + cursor_cmd->type = QXL_CURSOR_HIDE; + } else { + cursor_cmd->type = QXL_CURSOR_MOVE; + cursor_cmd->u.position.x = (INT16)pos_x; + cursor_cmd->u.position.y = (INT16)pos_y; + } + PushCursorCmd(pdev, cursor_cmd); + + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); +} + diff --git a/display/quic.c b/display/quic.c new file mode 100644 index 0000000..848d6d9 --- /dev/null +++ b/display/quic.c @@ -0,0 +1,18 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "..\common\quic.c" diff --git a/display/qxldd.h b/display/qxldd.h new file mode 100644 index 0000000..bcdcff2 --- /dev/null +++ b/display/qxldd.h @@ -0,0 +1,301 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _H_QXLDD +#define _H_QXLDD + + +#include "stddef.h" + +#include <stdarg.h> +#include <stdlib.h> +#include "windef.h" +#include "wingdi.h" +#include "winddi.h" +#include "qxl_driver.h" +#include "mspace.h" +#if (WINVER < 0x0501) +#include "wdmhelper.h" +#endif + +#define ALLOC_TAG 'dlxq' + +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define DEBUG_PRINT(arg) DebugPrint arg + +#ifdef DBG +#define ASSERT(pdev, x) if (!(x)) { \ + DebugPrint(pdev, 0, "ASSERT(%s) failed @ %s\n", #x, __FUNCTION__); \ + EngDebugBreak();\ +} +#define ONDBG(x) x +#else +#define ASSERT(pdev, x) +#define ONDBG(x) +#endif + +typedef enum { + QXL_SUCCESS, + QXL_FAILED, + QXL_UNSUPPORTED, +} QXLRESULT; + +typedef struct Ring RingItem; +typedef struct Ring { + RingItem *prev; + RingItem *next; +} Ring; + +#define IMAGE_HASH_SHIFT 15 +#define IMAGE_HASH_SIZE (1 << IMAGE_HASH_SHIFT) +#define IMAGE_HASH_MASK (IMAGE_HASH_SIZE - 1) + +#define IMAGE_POOL_SIZE (1 << 15) + +#define CURSOR_CACHE_SIZE (1 << 6) +#define CURSOR_HASH_SIZE (CURSOR_CACHE_SIZE << 1) +#define CURSOR_HASH_NASKE (CURSOR_HASH_SIZE - 1) + +#define PALETTE_CACHE_SIZE (1 << 6) +#define PALETTE_HASH_SIZE (PALETTE_CACHE_SIZE << 1) +#define PALETTE_HASH_NASKE (PALETTE_HASH_SIZE - 1) + +//#define CALL_TEST + +#ifdef CALL_TEST +enum { + CALL_COUNTER_COPY_BITS, + CALL_COUNTER_BIT_BLT, + CALL_COUNTER_TEXT_OUT, + CALL_COUNTER_STROKE_PATH, + CALL_COUNTER_STRETCH_BLT, + CALL_COUNTER_STRETCH_BLT_ROP, + CALL_COUNTER_TRANSPARENT_BLT, + CALL_COUNTER_ALPHA_BLEND, + + CALL_COUNTER_FILL_PATH, + CALL_COUNTER_GRADIENT_FILL, + CALL_COUNTER_LINE_TO, + CALL_COUNTER_PLG_BLT, + CALL_COUNTER_STROKE_AND_FILL_PATH, + + NUM_CALL_COUNTERS, +}; +#endif + +typedef struct QuicData QuicData; + +#define IMAGE_KEY_HASH_SIZE (1 << 15) +#define IMAGE_KEY_HASH_MASK (IMAGE_KEY_HASH_SIZE - 1) + +typedef struct ImageKey { + HSURF hsurf; + UINT64 unique; + UINT32 key; +} ImageKey; + +typedef struct CacheImage { + struct CacheImage *next; + RingItem lru_link; + UINT32 key; + UINT32 hits; + UINT8 format; + UINT32 width; + UINT32 height; + struct InternalImage *image; +} CacheImage; + +#define NUM_UPDATE_TRACE_ITEMS 10 +typedef struct UpdateTrace { + RingItem link; + UINT32 last_time; + RECTL area; + HSURF hsurf; +} UpdateTrace; + +typedef struct PDev { + HANDLE driver; + HDEV eng; + HPALETTE palette; + HSURF surf; + DWORD video_mode_index; + SIZEL resolution; + UINT32 max_bitmap_size; + ULONG bitmap_format; + ULONG fb_size; + BYTE* fb; + ULONG stride; + FLONG red_mask; + FLONG green_mask; + FLONG blue_mask; + ULONG fp_state_size; + + QuicData *quic_data; + + QXLCommandRing *cmd_ring; + QXLCursorRing *cursor_ring; + QXLReleaseRing *release_ring; + UINT32 notify_cmd_port; + UINT32 notify_cursor_port; + UINT32 notify_oom_port; + PEVENT display_event; + PEVENT cursor_event; + PEVENT sleep_event; + + HSURF draw_bitmap; + SURFOBJ *draw_surf; + + UINT32 log_port; + UINT8 *log_buf; + UINT32 *log_level; + + HSEMAPHORE malloc_sem; + HSEMAPHORE print_sem; + + UINT32 num_io_pages; + UINT8 *io_pages_virt; + UINT64 io_pages_phys; + + mspace mspace; + UINT8 *mspace_start; + UINT8 *mspace_end; + + UINT64 free_outputs; + + UINT32 update_id; + UINT32 *dev_update_id; + + UINT32 update_area_port; + Rect *update_area; + + UINT32 *mm_clock; + + UINT32 *compression_level; + + ImageKey image_key_lookup[IMAGE_KEY_HASH_SIZE]; + CacheImage cache_image_pool[IMAGE_POOL_SIZE]; + Ring cache_image_lru; + struct CacheImage *image_cache[IMAGE_HASH_SIZE]; + + struct InternalCursor *cursor_cache[CURSOR_HASH_SIZE]; + Ring cursors_lru; + UINT32 num_cursors; + UINT32 last_cursor_id; + FLONG cursor_trail; + + struct InternalPalette *palette_cache[PALETTE_HASH_SIZE]; + Ring palette_lru; + UINT32 num_palettes; + + Ring update_trace; + UpdateTrace update_trace_items[NUM_UPDATE_TRACE_ITEMS]; + +#ifdef DBG + int num_free_pages; + int num_outputs; + int num_path_pages; + int num_rects_pages; + int num_bits_pages; + int num_buf_pages; + int num_glyphs_pages; + int num_cursor_pages; +#endif + +#ifdef CALL_TEST + BOOL count_calls; + UINT32 total_calls; + UINT32 call_counters[NUM_CALL_COUNTERS]; +#endif + +#if (WINVER < 0x0501) + PQXLWaitForEvent WaitForEvent; +#endif +} PDev; + + +void DebugPrintV(PDev *pdev, const char *message, va_list ap); +void DebugPrint(PDev *pdev, int level, const char *message, ...); + +void InitResources(PDev *pdev); + +#ifdef CALL_TEST +void CountCall(PDev *pdev, int counter); +#else +#define CountCall(a, b) +#endif + +char *BitmapFormatToStr(int format); +char *BitmapTypeToStr(int type); + +static _inline void RingInit(Ring *ring) +{ + ring->next = ring->prev = ring; +} + +static _inline void RingItemInit(RingItem *item) +{ + item->next = item->prev = NULL; +} + +static _inline BOOL RingItemIsLinked(RingItem *item) +{ + return !!item->next; +} + +static _inline BOOL RingIsEmpty(PDev *pdev, Ring *ring) +{ + ASSERT(pdev, ring->next != NULL && ring->prev != NULL); + return ring == ring->next; +} + +static _inline void RingAdd(PDev *pdev, Ring *ring, RingItem *item) +{ + ASSERT(pdev, ring->next != NULL && ring->prev != NULL); + ASSERT(pdev, item->next == NULL && item->prev == NULL); + + item->next = ring->next; + item->prev = ring; + ring->next = item->next->prev = item; +} + +static _inline void RingRemove(PDev *pdev, RingItem *item) +{ + ASSERT(pdev, item->next != NULL && item->prev != NULL); + ASSERT(pdev, item->next != item); + + item->next->prev = item->prev; + item->prev->next = item->next; + item->prev = item->next = 0; +} + +static _inline RingItem *RingGetTail(PDev *pdev, Ring *ring) +{ + RingItem *ret; + + ASSERT(pdev, ring->next != NULL && ring->prev != NULL); + + if (RingIsEmpty(pdev, ring)) { + return NULL; + } + ret = ring->prev; + return ret; +} + +#endif diff --git a/display/res.c b/display/res.c new file mode 100644 index 0000000..13189be --- /dev/null +++ b/display/res.c @@ -0,0 +1,2433 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "os_dep.h" +#include "res.h" +#include "ioaccess.h" +#include "utils.h" +#include "mspace.h" +#include "quic.h" +#include "lookup3.h" + +#if (WINVER < 0x0501) +#define WAIT_FOR_EVENT(pdev, event, timeout) (pdev)->WaitForEvent(event, timeout) +#else +#define WAIT_FOR_EVENT(pdev, event, timeout) EngWaitForSingleObject(event, timeout) +#endif + +#define PA(pdev, vaddr)\ + ((pdev)->io_pages_phys + ((UINT8*)(vaddr) - (pdev)->io_pages_virt)) + +#define VA(pdev, paddr)\ + ((pdev)->io_pages_virt + ((paddr) - (pdev)->io_pages_phys)) + +#define RELEASE_RES(pdev, res) if (!--(res)->refs) (res)->free(pdev, res); +#define GET_RES(res) (++(res)->refs) + +typedef struct Resource Resource; +struct Resource { + UINT32 refs; + void (*free)(PDev *pdev, Resource *res); + UINT8 res[0]; +}; + +static void FreeMem(PDev* pdev, void *ptr); +static BOOL SetClip(PDev *pdev, CLIPOBJ *clip, QXLDrawable *drawable); + + +#define PUSH_CMD(pdev) do { \ + int notify; \ + RING_PUSH(pdev->cmd_ring, notify); \ + if (notify) { \ + WRITE_PORT_UCHAR(pdev->notify_cmd_port, 0); \ + } \ +} while (0); + +#define PUSH_CURSOR_CMD(pdev) do { \ + int notify; \ + RING_PUSH(pdev->cursor_ring, notify); \ + if (notify) { \ + WRITE_PORT_UCHAR(pdev->notify_cursor_port, 0); \ + } \ +} while (0); + + +#define MAX_OUTPUT_RES 6 + +typedef struct QXLOutput { + UINT32 num_res; + Resource *resources[MAX_OUTPUT_RES]; + UINT8 data[0]; +} QXLOutput; + + +UINT64 ReleaseOutput(PDev *pdev, UINT64 output_id) +{ + QXLOutput *output = (QXLOutput *)output_id; + Resource **now; + Resource **end; + UINT64 next; + + ASSERT(pdev, output_id); + DEBUG_PRINT((pdev, 9, "%s 0x%x\n", __FUNCTION__, output)); + + for (now = output->resources, end = now + output->num_res; now < end; now++) { + RELEASE_RES(pdev, *now); + } + next = *(UINT64*)output->data; + FreeMem(pdev, output); + DEBUG_PRINT((pdev, 10, "%s done\n", __FUNCTION__)); + ONDBG(pdev->num_outputs--); //todo: atomic + return next; +} + +static void AddRes(PDev *pdev, QXLOutput *output, Resource *res) +{ + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + ASSERT(pdev, output->num_res < MAX_OUTPUT_RES); + res->refs++; + output->resources[output->num_res++] = res; + DEBUG_PRINT((pdev, 10, "%s: done\n", __FUNCTION__)); +} + +static _inline void DrawableAddRes(PDev *pdev, QXLDrawable *drawable, Resource *res) +{ + QXLOutput *output; + + output = (QXLOutput *)((UINT8 *)drawable - sizeof(QXLOutput)); + AddRes(pdev, output, res); +} + +static _inline void CursorCmdAddRes(PDev *pdev, QXLCursorCmd *cmd, Resource *res) +{ + QXLOutput *output; + + output = (QXLOutput *)((UINT8 *)cmd - sizeof(QXLOutput)); + AddRes(pdev, output, res); +} + +static void WaitForCursorRing(PDev* pdev) +{ + int wait; + + DEBUG_PRINT((pdev, 9, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + for (;;) { + RING_PROD_WAIT(pdev->cursor_ring, wait); + + if (!wait) { + break; + } +#ifdef DBG + { + LARGE_INTEGER timeout; // 1 => 100 nanoseconds + timeout.QuadPart = -1 * (1000 * 1000 * 10); //negative => relative // 1s +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->cursor_event, &timeout); +#else + EngWaitForSingleObject(pdev->cursor_event, &timeout); +#endif // (WINVER < 0x0501) + + if (RING_IS_FULL(pdev->cursor_ring)) { + DEBUG_PRINT((pdev, 0, "%s: 0x%lx: timeout\n", __FUNCTION__, pdev)); + } + } +#else +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->cursor_event, NULL); +#else + EngWaitForSingleObject(pdev->cursor_event, NULL); +#endif // (WINVER < 0x0501) +#endif //DBG + } +} + +static void WaitForCmdRing(PDev* pdev) +{ + int wait; + + DEBUG_PRINT((pdev, 9, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + for (;;) { + RING_PROD_WAIT(pdev->cmd_ring, wait); + + if (!wait) { + break; + } +#ifdef DBG + { + LARGE_INTEGER timeout; // 1 => 100 nanoseconds + timeout.QuadPart = -1 * (1000 * 1000 * 10); //negative => relative // 1s +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->display_event, &timeout); +#else + EngWaitForSingleObject(pdev->display_event, &timeout); +#endif // (WINVER < 0x0501) + + if (RING_IS_FULL(pdev->cmd_ring)) { + DEBUG_PRINT((pdev, 0, "%s: 0x%lx: timeout\n", __FUNCTION__, pdev)); + } + } +#else +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->display_event, NULL); +#else + EngWaitForSingleObject(pdev->display_event, NULL); +#endif // (WINVER < 0x0501) +#endif //DBG + } +} + +static void QXLSleep(PDev* pdev, int msec) +{ + LARGE_INTEGER timeout; + + DEBUG_PRINT((pdev, 18, "%s: 0x%lx msec %u\n", __FUNCTION__, pdev, msec)); + timeout.QuadPart = -msec * 1000 * 10; + WAIT_FOR_EVENT(pdev, pdev->sleep_event, &timeout); + DEBUG_PRINT((pdev, 19, "%s: 0x%lx exit\n", __FUNCTION__, pdev)); +} + +static void WaitForReleaseRing(PDev* pdev) +{ + int wait; + + DEBUG_PRINT((pdev, 15, "%s: 0x%lx\n", __FUNCTION__, pdev)); + + for (;;) { + LARGE_INTEGER timeout; + + if (RING_IS_EMPTY(pdev->release_ring)) { + QXLSleep(pdev, 10); + if (!RING_IS_EMPTY(pdev->release_ring)) { + break; + } + WRITE_PORT_UCHAR(pdev->notify_oom_port, 0); + } + RING_CONS_WAIT(pdev->release_ring, wait); + + if (!wait) { + break; + } + + timeout.QuadPart = -30 * 1000 * 10; //30ms + WAIT_FOR_EVENT(pdev, pdev->display_event, &timeout); + + if (RING_IS_EMPTY(pdev->release_ring)) { +#ifdef DBG + DEBUG_PRINT((pdev, 0, "%s: 0x%lx: timeout\n", __FUNCTION__, pdev)); + DEBUG_PRINT((pdev, 0, "\tfree %d out %d path %d rect %d bits %d\n", + pdev->num_free_pages, + pdev->num_outputs, + pdev->num_path_pages, + pdev->num_rects_pages, + pdev->num_bits_pages, + pdev->num_buf_pages, + pdev->num_glyphs_pages, + pdev->num_cursor_pages)); +#endif + //oom + WRITE_PORT_UCHAR(pdev->notify_oom_port, 0); + } + } + DEBUG_PRINT((pdev, 16, "%s: 0x%lx, done\n", __FUNCTION__, pdev)); +} + +static void InitMspace(PDev *pdev) +{ + size_t capacity = pdev->num_io_pages * PAGE_SIZE; + pdev->mspace = create_mspace_with_base(pdev->io_pages_virt, capacity, 0, pdev); + pdev->mspace_start = pdev->io_pages_virt; + pdev->mspace_end = pdev->io_pages_virt + capacity; +} + +static void *AllocMem(PDev* pdev, size_t size) +{ + UINT8 *ptr; + + ASSERT(pdev, pdev && pdev->mspace); + DEBUG_PRINT((pdev, 12, "%s: 0x%lx size %u\n", __FUNCTION__, pdev, size)); + + EngAcquireSemaphore(pdev->malloc_sem); + while (!(ptr = mspace_malloc(pdev->mspace, size))) { + int notify; + if (pdev->free_outputs) { + pdev->free_outputs = ReleaseOutput(pdev, pdev->free_outputs); + continue; + } + WaitForReleaseRing(pdev); + pdev->free_outputs = *RING_CONS_ITEM(pdev->release_ring); + RING_POP(pdev->release_ring, notify); + } + EngReleaseSemaphore(pdev->malloc_sem); + ASSERT(pdev, ptr >= pdev->mspace_start && ptr < pdev->mspace_end); + DEBUG_PRINT((pdev, 13, "%s: 0x%lx done 0x%x\n", __FUNCTION__, pdev, ptr)); + return ptr; +} + +static void FreeMem(PDev* pdev, void *ptr) +{ + ASSERT(pdev, pdev && pdev->mspace); + ASSERT(pdev, (UINT8 *)ptr >= pdev->mspace_start && (UINT8 *)ptr < pdev->mspace_end); + mspace_free(pdev->mspace, ptr); +} + +void InitResources(PDev *pdev) +{ + int i; + + pdev->free_outputs = 0; + InitMspace(pdev); + pdev->update_id = *pdev->dev_update_id; + + RtlZeroMemory(pdev->image_key_lookup, sizeof(pdev->image_key_lookup)); + RtlZeroMemory(pdev->cache_image_pool, sizeof(pdev->cache_image_pool)); + RingInit(&pdev->cache_image_lru); + for (i = 0; i < IMAGE_POOL_SIZE; i++) { + RingAdd(pdev, &pdev->cache_image_lru, &pdev->cache_image_pool[i].lru_link); + } + RtlZeroMemory(pdev->image_cache, sizeof(pdev->image_cache)); + + RtlZeroMemory(pdev->cursor_cache, sizeof(pdev->cursor_cache)); + RingInit(&pdev->cursors_lru); + pdev->num_cursors = 0; + pdev->last_cursor_id = 0; + + RtlZeroMemory(pdev->palette_cache, sizeof(pdev->palette_cache)); + RingInit(&pdev->palette_lru); + pdev->num_palettes = 0; + + RtlZeroMemory(pdev->update_trace_items, sizeof(pdev->update_trace_items)); + RingInit(&pdev->update_trace); + for (i = 0; i < NUM_UPDATE_TRACE_ITEMS; i++) { + RingAdd(pdev, &pdev->update_trace, &pdev->update_trace_items[i].link); + } + + ONDBG(pdev->num_outputs = 0); + ONDBG(pdev->num_path_pages = 0); + ONDBG(pdev->num_rects_pages = 0); + ONDBG(pdev->num_bits_pages = 0); + ONDBG(pdev->num_buf_pages = 0); + ONDBG(pdev->num_glyphs_pages = 0); + ONDBG(pdev->num_cursor_pages = 0); + +#ifdef CALL_TEST + pdev->count_calls = TRUE; + pdev->total_calls = 0; + for (i = 0; i < NUM_CALL_COUNTERS; i++) { + pdev->call_counters[i] = 0; + } +#endif +} + +static QXLDrawable *GetDrawable(PDev *pdev) +{ + QXLOutput *output; + + output = (QXLOutput *)AllocMem(pdev, sizeof(QXLOutput) + sizeof(QXLDrawable)); + output->num_res = 0; + ((QXLDrawable *)output->data)->release_info.id = (UINT64)output; + DEBUG_PRINT((pdev, 9, "%s 0x%x\n", __FUNCTION__, output)); + ONDBG(pdev->num_outputs++); //todo: atomic + return (QXLDrawable *)output->data; +} + +QXLDrawable *Drawable(PDev *pdev, UINT8 type, RECTL *area, CLIPOBJ *clip) +{ + QXLDrawable *drawable; + + ASSERT(pdev, pdev && area); + + drawable = GetDrawable(pdev); + drawable->type = type; + drawable->effect = QXL_EFFECT_BLEND; + drawable->bitmap_offset = 0; + drawable->mm_time = *pdev->mm_clock; + CopyRect(&drawable->bbox, area); + + if (!SetClip(pdev, clip, drawable)) { + DEBUG_PRINT((pdev, 0, "%s: set clip filed\n", __FUNCTION__)); + ReleaseOutput(pdev, drawable->release_info.id); + drawable = NULL; + } + return drawable; +} + +void PushDrawable(PDev *pdev, QXLDrawable *drawable) +{ + QXLCommand *cmd; + + WaitForCmdRing(pdev); + cmd = RING_PROD_ITEM(pdev->cmd_ring); + cmd->type = QXL_CMD_DRAW; + cmd->data = PA(pdev, drawable); + PUSH_CMD(pdev); +} + +static void FreePath(PDev *pdev, Resource *res) +{ + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + chunk_phys = ((QXLPath *)res->res)->chunk.next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, chunk); + ONDBG(pdev->num_path_pages--); + } + FreeMem(pdev, res); + ONDBG(pdev->num_path_pages--); + + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + +#define NEW_DATA_CHUNK(page_counter, size) { \ + void *ptr = AllocMem(pdev, size + sizeof(QXLDataChunk)); \ + ONDBG((*(page_counter))++); \ + chunk->next_chunk = PA(pdev, ptr); \ + ((QXLDataChunk *)ptr)->prev_chunk = PA(pdev, chunk); \ + chunk = (QXLDataChunk *)ptr; \ + chunk->data_size = 0; \ + chunk->next_chunk = 0; \ + now = chunk->data; \ + end = now + size; \ +} + +#ifdef DBG +#define GetPathCommon __GetPathCommon +#else +#define GetPathCommon(pdev, path, chunk_ptr, now_ptr, end_ptr, data_size, page_counter)\ + __GetPathCommon(pdev, path, chunk_ptr, now_ptr, end_ptr, data_size, NULL) +#endif + +#define PATH_PREALLOC_PONTS 20 +#define PATH_MAX_ALLOC_PONTS 128 +#define PATH_ALLOC_SIZE (sizeof(Resource) + sizeof(QXLPath) + sizeof(PathSeg) +\ + sizeof(POINTFIX) * PATH_PREALLOC_PONTS) + + +static void __GetPathCommon(PDev *pdev, PATHOBJ *path, QXLDataChunk **chunk_ptr, UINT8 **now_ptr, + UINT8 **end_ptr, UINT32 *data_size, int *page_counter) +{ + QXLDataChunk *chunk = *chunk_ptr; + UINT8 *now = *now_ptr; + UINT8 *end = *end_ptr; + PATHDATA data; + int more; + + DEBUG_PRINT((pdev, 15, "%s\n", __FUNCTION__)); + PATHOBJ_vEnumStart(path); + + do { + int pt_buf_size; + UINT8 *pt_buf; + PathSeg *seg; + + more = PATHOBJ_bEnum(path, &data); + if (data.count == 0) { + break; + } + + if (end - now < sizeof(PathSeg)) { + size_t alloc_size = MIN(data.count << 3, sizeof(POINTFIX) * PATH_MAX_ALLOC_PONTS); + alloc_size += sizeof(PathSeg); + NEW_DATA_CHUNK(page_counter, alloc_size); + } + seg = (PathSeg*)now; + seg->flags = data.flags; + seg->count = data.count; + now = seg->data; + chunk->data_size += sizeof(*seg); + *data_size += sizeof(*seg); + pt_buf_size = data.count << 3; + pt_buf = (UINT8 *)data.pptfx; + + do { + int cp_size; + if (end == now ) { + size_t alloc_size = MIN(pt_buf_size, sizeof(POINTFIX) * PATH_MAX_ALLOC_PONTS); + NEW_DATA_CHUNK(page_counter, alloc_size); + } + + cp_size = MIN(end - now, pt_buf_size); + memcpy(now, pt_buf, cp_size); + chunk->data_size += cp_size; + *data_size += cp_size; + now += cp_size; + pt_buf += cp_size; + pt_buf_size -= cp_size; + } while (pt_buf_size); + } while (more); + + *chunk_ptr = chunk; + *now_ptr = now; + *end_ptr = end; + DEBUG_PRINT((pdev, 17, "%s: done\n", __FUNCTION__)); +} + +static Resource *__GetPath(PDev *pdev, PATHOBJ *path) +{ + Resource *res; + QXLPath *qxl_path; + QXLDataChunk *chunk; + PATHDATA data; + UINT8 *now; + UINT8 *end; + int more; + + ASSERT(pdev, QXL_PATH_BEGIN == PD_BEGINSUBPATH && QXL_PATH_END == PD_ENDSUBPATH && + QXL_PATH_CLOSE == PD_CLOSEFIGURE && QXL_PATH_BEZIER == PD_BEZIERS); + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + res = AllocMem(pdev, PATH_ALLOC_SIZE); + ONDBG(pdev->num_path_pages++); + res->refs = 1; + res->free = FreePath; + + qxl_path = (QXLPath *)res->res; + qxl_path->data_size = 0; + chunk = &qxl_path->chunk; + chunk->data_size = 0; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + + now = chunk->data; + end = (UINT8 *)res + PATH_ALLOC_SIZE; + GetPathCommon(pdev, path, &chunk, &now, &end, &qxl_path->data_size, &pdev->num_path_pages); + + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); + return res; +} + +BOOL QXLGetPath(PDev *pdev, QXLDrawable *drawable, PHYSICAL *path_phys, PATHOBJ *path) +{ + Resource *path_res; + ASSERT(pdev, pdev && drawable && path_phys && path); + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + + path_res = __GetPath(pdev, path); + *path_phys = PA(pdev, path_res->res); + DrawableAddRes(pdev, drawable, path_res); + RELEASE_RES(pdev, path_res); + return TRUE; +} + + +static void FreeClipRects(PDev *pdev, Resource *res) +{ + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + chunk_phys = ((QXLClipRects *)res->res)->chunk.next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, chunk); + ONDBG(pdev->num_rects_pages--); + } + FreeMem(pdev, res); + ONDBG(pdev->num_rects_pages--); + + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + + +#define RECTS_NUM_PREALLOC 8 +#define RECTS_ALLOC_SIZE (sizeof(Resource) + sizeof(QXLClipRects) + \ + sizeof(Rect) * RECTS_NUM_PREALLOC) +#define RECTS_NUM_ALLOC 20 +#define RECTS_CHUNK_ALLOC_SIZE (sizeof(QXLDataChunk) + sizeof(Rect) * RECTS_NUM_ALLOC) + +static Resource *GetClipRects(PDev *pdev, CLIPOBJ *clip) +{ + Resource *res; + QXLClipRects *rects; + QXLDataChunk *chunk; + Rect *dest; + Rect *dest_end; + int more; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + res = (Resource *)AllocMem(pdev, RECTS_ALLOC_SIZE); + ONDBG(pdev->num_rects_pages++); + res->refs = 1; + res->free = FreeClipRects; + rects = (QXLClipRects *)res->res; + rects->num_rects = 0; + + chunk = &rects->chunk; + chunk->data_size = 0; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + + dest = (Rect *)chunk->data; + dest_end = dest + ((RECTS_ALLOC_SIZE - sizeof(Resource) - sizeof(QXLClipRects)) >> 4); + + CLIPOBJ_cEnumStart(clip, TRUE, CT_RECTANGLES, CD_RIGHTDOWN, 0); + do { + RECTL *now; + RECTL *end; + struct { + ULONG count; + RECTL rects[20]; + } buf; + + more = CLIPOBJ_bEnum(clip, sizeof(buf), (ULONG *)&buf); + rects->num_rects += buf.count; + for(now = buf.rects, end = now + buf.count; now < end; now++, dest++) { + if (dest == dest_end) { + void *page = AllocMem(pdev, RECTS_CHUNK_ALLOC_SIZE); + ONDBG(pdev->num_rects_pages++); + chunk->next_chunk = PA(pdev, page); + ((QXLDataChunk *)page)->prev_chunk = PA(pdev, chunk); + chunk = (QXLDataChunk *)page; + chunk->data_size = 0; + chunk->next_chunk = 0; + dest = (Rect *)chunk->data; + dest_end = dest + RECTS_NUM_ALLOC; + } + CopyRect(dest, now); + chunk->data_size += sizeof(Rect); + } + } while (more); + DEBUG_PRINT((pdev, 13, "%s: done, num_rects %d\n", __FUNCTION__, rects->num_rects)); + return res; +} + +static BOOL SetClip(PDev *pdev, CLIPOBJ *clip, QXLDrawable *drawable) +{ + Resource *rects_res; + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + + if (clip == NULL) { + drawable->clip.type = CLIP_TYPE_NONE; + DEBUG_PRINT((pdev, 10, "%s: QXL_CLIP_TYPE_NONE\n", __FUNCTION__)); + return TRUE; + } + + if (clip->iDComplexity == DC_RECT) { + QXLClipRects *rects; + rects_res = (Resource *)AllocMem(pdev, sizeof(Resource) + sizeof(QXLClipRects) + + sizeof(Rect)); + rects_res->refs = 1; + rects_res->free = FreeClipRects; + rects = (QXLClipRects *)rects_res->res; + rects->num_rects = 1; + rects->chunk.data_size = sizeof(Rect); + rects->chunk.prev_chunk = 0; + rects->chunk.next_chunk = 0; + CopyRect((Rect *)rects->chunk.data, &clip->rclBounds); + } else { + + ASSERT(pdev, clip->iDComplexity == DC_COMPLEX); + if (clip->iMode == TC_PATHOBJ) { + Resource *path_res; + PATHOBJ *path = CLIPOBJ_ppoGetPath(clip); + if (!path) { + DEBUG_PRINT((pdev, 0, "%s: get path failed\n", __FUNCTION__)); + return FALSE; + } + + path_res = __GetPath(pdev, path); + EngDeletePath(path); + DrawableAddRes(pdev, drawable, path_res); + RELEASE_RES(pdev, path_res); + drawable->clip.type = CLIP_TYPE_PATH; + drawable->clip.data = PA(pdev, path_res->res); + DEBUG_PRINT((pdev, 10, "%s: done\n", __FUNCTION__)); + return TRUE; + } else { + ASSERT(pdev, clip->iMode == TC_RECTANGLES); + rects_res = GetClipRects(pdev, clip); + } + } + + DrawableAddRes(pdev, drawable, rects_res); + RELEASE_RES(pdev, rects_res); + drawable->clip.type = CLIP_TYPE_RECTS; + drawable->clip.data = PA(pdev, rects_res->res); + DEBUG_PRINT((pdev, 10, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +#ifdef DBG +#define PutBytesAlign __PutBytesAlign +#define PutBytes(pdev, chunk, now, end, src, size, page_counter, alloc_size)\ + __PutBytesAlign(pdev, chunk, now, end, src, size, page_counter, alloc_size, 1) +#else +#define PutBytesAlign(pdev, chunk, now, end, src, size, page_counter, alloc_size, alignment)\ + __PutBytesAlign(pdev, chunk, now, end, src, size, NULL, alloc_size, alignment) +#define PutBytes(pdev, chunk, now, end, src, size, page_counter, alloc_size)\ + __PutBytesAlign(pdev, chunk, now, end, src, size, NULL, alloc_size, 1) +#endif + +#define BITS_BUF_MAX (64 * 1024) + +static void __PutBytesAlign(PDev *pdev, QXLDataChunk **chunk_ptr, UINT8 **now_ptr, + UINT8 **end_ptr, UINT8 *src, int size, int *page_counter, + size_t alloc_size, uint32_t alignment) +{ + QXLDataChunk *chunk = *chunk_ptr; + UINT8 *now = *now_ptr; + UINT8 *end = *end_ptr; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + while (size) { + int cp_size = MIN(end - now, size); + if (!cp_size) { + size_t aligned_size; + ASSERT(pdev, alloc_size > 0); + ASSERT(pdev, BITS_BUF_MAX > alignment); + aligned_size = MIN(alloc_size + alignment - 1, BITS_BUF_MAX); + aligned_size -= aligned_size % alignment; + NEW_DATA_CHUNK(page_counter, aligned_size); + cp_size = MIN(end - now, size); + } + RtlCopyMemory(now, src, cp_size); + src += cp_size; + now += cp_size; + chunk->data_size += cp_size; + size -= cp_size; + } + *chunk_ptr = chunk; + *now_ptr = now; + *end_ptr = end; + DEBUG_PRINT((pdev, 14, "%s: done\n", __FUNCTION__)); +} + +typedef struct InternalImage { + CacheImage *cache; + QXLImage image; +} InternalImage; + +#define HSURF_HASH_VAL(h) (((unsigned long)h >> 4) ^ ((unsigned long)(h) >> 8) ^ \ + ((unsigned long)(h) >> 16) ^ ((unsigned long)(h) >> 24)) + +#define IMAGE_KEY_HASH_VAL(hsurf) (HSURF_HASH_VAL(hsurf) & IMAGE_KEY_HASH_MASK) + +void ImageKeyPut(PDev *pdev, HSURF hsurf, UINT64 unique, UINT32 key) +{ + ImageKey *image_key = &pdev->image_key_lookup[IMAGE_KEY_HASH_VAL(hsurf)]; + + if (!unique) { + return; + } + image_key->hsurf = hsurf; + image_key->unique = unique; + image_key->key = key; +} + +BOOL ImageKeyGet(PDev *pdev, HSURF hsurf, UINT64 unique, UINT32 *key) +{ + ImageKey *image_key; + + if (!unique) { + return FALSE; + } + image_key = &pdev->image_key_lookup[IMAGE_KEY_HASH_VAL(hsurf)]; + if (image_key->hsurf == hsurf && image_key->unique == unique) { + *key = image_key->key; + return TRUE; + } + return FALSE; +} + +#define IMAGE_HASH_VAL(hsurf) (HSURF_HASH_VAL(hsurf) & IMAGE_HASH_MASK) + +static CacheImage *ImageCacheGetByKey(PDev *pdev, UINT32 key, BOOL check_rest, + UINT8 format, UINT32 width, UINT32 height) +{ + CacheImage *cache_image = pdev->image_cache[IMAGE_HASH_VAL(key)]; + + while (cache_image) { + if (cache_image->key == key && (!check_rest || (cache_image->format == format && + cache_image->width == width && cache_image->height == height))) { + cache_image->hits++; + return cache_image; + } + cache_image = cache_image->next; + } + return NULL; +} + +static void ImageCacheAdd(PDev *pdev, CacheImage *cache_image) +{ + int key = IMAGE_HASH_VAL(cache_image->key); + cache_image->next = pdev->image_cache[key]; + cache_image->hits = 1; + pdev->image_cache[key] = cache_image; +} + +static void ImageCacheRemove(PDev *pdev, CacheImage *cache_image) +{ + CacheImage **cache_img; + + if (!cache_image->hits) { + return; + } + cache_img = &pdev->image_cache[IMAGE_HASH_VAL(cache_image->key)]; + while (*cache_img) { + if ((*cache_img)->key == cache_image->key) { + *cache_img = cache_image->next; + return; + } + cache_img = &(*cache_img)->next; + } +} + +static CacheImage *AllocCacheImage(PDev* pdev) +{ + RingItem *item; + + while (!(item = RingGetTail(pdev, &pdev->cache_image_lru))) { + int notify; + if (pdev->free_outputs) { + pdev->free_outputs = ReleaseOutput(pdev, pdev->free_outputs); + continue; + } + WaitForReleaseRing(pdev); + pdev->free_outputs = *RING_CONS_ITEM(pdev->release_ring); + RING_POP(pdev->release_ring, notify); + } + RingRemove(pdev, item); + return CONTAINEROF(item, CacheImage, lru_link); +} + +#define IMAGE_HASH_INIT_VAL(width, height, format) \ + ((UINT32)((width) & 0x1FFF) | ((UINT32)((height) & 0x1FFF) << 13) |\ + ((UINT32)(format) << 26)) + +static _inline void SetImageId(InternalImage *internal, BOOL cache_me, LONG width, LONG height, + UINT8 format, UINT32 key) +{ + UINT32 image_info = IMAGE_HASH_INIT_VAL(width, height, format); + + if (cache_me) { + QXL_SET_IMAGE_ID(&internal->image, ((UINT32)QXL_IMAGE_GROUP_DRIVER << 30) | + image_info, key); + internal->image.descriptor.flags = QXL_IMAGE_CACHE; + } else { + QXL_SET_IMAGE_ID(&internal->image, ((UINT32)QXL_IMAGE_GROUP_DRIVER_DONT_CACHE << 30) | + image_info, key); + internal->image.descriptor.flags = 0; + } +} + +typedef struct InternalPalette { + UINT32 refs; + struct InternalPalette *next; + RingItem lru_link; + Palette palette; +} InternalPalette; + +#define PALETTE_HASH_VAL(unique) ((int)(unique) & PALETTE_HASH_NASKE) + +static _inline void ReleasePalette(PDev *pdev, InternalPalette *palette) +{ + ASSERT(pdev, palette); + DEBUG_PRINT((pdev, 15, "%s\n", __FUNCTION__)); + if (--palette->refs == 0) { + FreeMem(pdev, palette); + } +} + +static _inline void PaletteCacheRemove(PDev *pdev, InternalPalette *palette) +{ + InternalPalette **internal; + + DEBUG_PRINT((pdev, 15, "%s\n", __FUNCTION__)); + + ASSERT(pdev, palette->palette.unique); + internal = &pdev->palette_cache[PALETTE_HASH_VAL(palette->palette.unique)]; + + while (*internal) { + if ((*internal)->palette.unique == palette->palette.unique) { + *internal = palette->next; + RingRemove(pdev, &palette->lru_link); + ReleasePalette(pdev, palette); + pdev->num_palettes--; + DEBUG_PRINT((pdev, 16, "%s: done\n", __FUNCTION__)); + return; + } + internal = &(*internal)->next; + } + ASSERT(pdev, FALSE); +} + +static _inline InternalPalette *PaletteCacheGet(PDev *pdev, UINT32 unique) +{ + InternalPalette *now; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + if (!unique) { + return NULL; + } + + now = pdev->palette_cache[PALETTE_HASH_VAL(unique)]; + while (now) { + if (now->palette.unique == unique) { + RingRemove(pdev, &now->lru_link); + RingAdd(pdev, &pdev->palette_lru, &now->lru_link); + now->refs++; + DEBUG_PRINT((pdev, 13, "%s: found\n", __FUNCTION__)); + return now; + } + now = now->next; + } + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); + return NULL; +} + +static _inline void PaletteCacheAdd(PDev *pdev, InternalPalette *palette) +{ + int key; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + if (!palette->palette.unique) { + DEBUG_PRINT((pdev, 13, "%s: not unique\n", __FUNCTION__)); + return; + } + + if (pdev->num_palettes == PALETTE_CACHE_SIZE) { + ASSERT(pdev, RingGetTail(pdev, &pdev->palette_lru)); + PaletteCacheRemove(pdev, CONTAINEROF(RingGetTail(pdev, &pdev->palette_lru), + InternalPalette, lru_link)); + } + + key = PALETTE_HASH_VAL(palette->palette.unique); + palette->next = pdev->palette_cache[key]; + pdev->palette_cache[key] = palette; + + RingAdd(pdev, &pdev->palette_lru, &palette->lru_link); + palette->refs++; + pdev->num_palettes++; + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + + +static _inline void GetPallette(PDev *pdev, Bitmap *bitmap, XLATEOBJ *color_trans) +{ + InternalPalette *internal; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + if (!color_trans || !(color_trans->flXlate & XO_TABLE)) { + bitmap->palette = 0; + return; + } + + if ((internal = PaletteCacheGet(pdev, color_trans->iUniq))) { + DEBUG_PRINT((pdev, 12, "%s: from cache\n", __FUNCTION__)); + bitmap->palette = PA(pdev, &internal->palette); + return; + } + + internal = (InternalPalette *)AllocMem(pdev, sizeof(InternalPalette) + + (color_trans->cEntries << 2)); + internal->refs = 1; + RingItemInit(&internal->lru_link); + bitmap->palette = PA(pdev, &internal->palette); + internal->palette.unique = color_trans->iUniq; + internal->palette.num_ents = (UINT16)color_trans->cEntries; + + RtlCopyMemory(internal->palette.ents, color_trans->pulXlate, color_trans->cEntries << 2); + PaletteCacheAdd(pdev, internal); + DEBUG_PRINT((pdev, 12, "%s: done\n", __FUNCTION__)); +} + +static void FreeQuicImage(PDev *pdev, Resource *res) // todo: defer +{ + InternalImage *internal; + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + internal = (InternalImage *)res->res; + if (internal->cache) { + RingAdd(pdev, &pdev->cache_image_lru, &internal->cache->lru_link); + internal->cache->image = NULL; + } + + chunk_phys = ((QXLDataChunk *)internal->image.quic.data)->next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, chunk); + ONDBG(pdev->num_bits_pages--); + + } + FreeMem(pdev, res); + ONDBG(pdev->num_bits_pages--); + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + +static _inline QuicImageType GetQuicImageType(UINT8 format) +{ + switch (format) { + case BITMAP_FMT_32BIT: + return QUIC_IMAGE_TYPE_RGB32; + case BITMAP_FMT_16BIT: + return QUIC_IMAGE_TYPE_RGB16; + case BITMAP_FMT_RGBA: + return QUIC_IMAGE_TYPE_RGBA; + case BITMAP_FMT_24BIT: + return QUIC_IMAGE_TYPE_RGB24; + default: + return QUIC_IMAGE_TYPE_INVALID; + }; +} + +#define QUIC_ALLOC_BASE (sizeof(Resource) + sizeof(InternalImage) + sizeof(QXLDataChunk)) +#define QUIC_BUF_MAX (64 * 1024) +#define QUIC_BUF_MIN 1024 + +struct QuicData { + QuicUsrContext user; + PDev *pdev; + QuicContext *quic; + QXLDataChunk *chunk; + int chunk_io_words; + int prev_chunks_io_words; + int rows; + int raw_row_size; +}; + +static int quic_usr_more_space(QuicUsrContext *usr, uint32_t **io_ptr, int rows_completed) +{ + QuicData *usr_data = (QuicData *)usr; + PDev *pdev = usr_data->pdev; + QXLDataChunk *new_chank; + int alloc_size; + int more; + + ASSERT(pdev, usr_data->rows >= rows_completed); + more = (rows_completed - usr_data->rows) * usr_data->raw_row_size; + + alloc_size = MIN(MAX(more >> 4, QUIC_BUF_MIN), QUIC_BUF_MAX); + new_chank = AllocMem(pdev, sizeof(QXLDataChunk) + alloc_size); + new_chank->data_size = 0; + new_chank->prev_chunk = PA(pdev, usr_data->chunk); + new_chank->next_chunk = 0; + + usr_data->prev_chunks_io_words += usr_data->chunk_io_words; + usr_data->chunk->data_size = usr_data->chunk_io_words << 2; + usr_data->chunk->next_chunk = PA(pdev, new_chank); + usr_data->chunk = new_chank; + + usr_data->chunk_io_words = alloc_size >> 2; + + ONDBG(pdev->num_bits_pages++); + + *io_ptr = (UINT32 *)new_chank->data; + return usr_data->chunk_io_words; +} + +static int quic_usr_more_lines(QuicUsrContext *usr, uint8_t **lines) +{ + return 0; +} + +static _inline Resource *GetQuicImage(PDev *pdev, SURFOBJ *surf, XLATEOBJ *color_trans, + BOOL cache_me, LONG width, LONG height, UINT8 format, + UINT8 *src, UINT32 line_size, UINT32 key) +{ + Resource *image_res; + InternalImage *internal; + QuicImageType type; + size_t alloc_size; + int data_size; + QuicData *quic_data; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev->quic_data); + + if (!*pdev->compression_level) { + return NULL; + } + + if ((type = GetQuicImageType(format)) == QUIC_IMAGE_TYPE_INVALID) { + DEBUG_PRINT((pdev, 13, "%s: unsupported\n", __FUNCTION__)); + return NULL; + } + + quic_data = pdev->quic_data; + + alloc_size = MIN(QUIC_ALLOC_BASE + (height * line_size >> 4), QUIC_ALLOC_BASE + QUIC_BUF_MAX); + alloc_size = MAX(alloc_size, QUIC_ALLOC_BASE + QUIC_BUF_MIN); + + image_res = AllocMem(pdev, alloc_size); + ONDBG(pdev->num_bits_pages++); + image_res->refs = 1; + image_res->free = FreeQuicImage; + + internal = (InternalImage *)image_res->res; + SetImageId(internal, cache_me, width, height, format, key); + internal->image.descriptor.type = IMAGE_TYPE_QUIC; + internal->image.descriptor.width = width; + internal->image.descriptor.height = height; + + quic_data->chunk = (QXLDataChunk *)internal->image.quic.data; + quic_data->chunk->data_size = 0; + quic_data->chunk->prev_chunk = 0; + quic_data->chunk->next_chunk = 0; + quic_data->prev_chunks_io_words = 0; + quic_data->chunk_io_words = ((UINT8 *)image_res + alloc_size - quic_data->chunk->data) >> 2; + quic_data->rows = height; + quic_data->raw_row_size = line_size; + + ASSERT(pdev, quic_data->chunk_io_words > 0); + data_size = quic_encode(quic_data->quic, type, width, height, src, height, surf->lDelta, + (UINT32 *)quic_data->chunk->data, quic_data->chunk_io_words); + if (data_size == QUIC_ERROR) { + FreeQuicImage(pdev, image_res); + DEBUG_PRINT((pdev, 13, "%s: error\n", __FUNCTION__)); + return NULL; + } + + quic_data->chunk->data_size = (data_size - quic_data->prev_chunks_io_words) << 2; + internal->image.quic.data_size = data_size << 2; + DEBUG_PRINT((pdev, 13, "%s: done. row size %u quic size %u \n", __FUNCTION__, + line_size * height, data_size << 2)); + return image_res; +} + +static void FreeBitmapImage(PDev *pdev, Resource *res) // todo: defer +{ + InternalImage *internal; + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + internal = (InternalImage *)res->res; + if (internal->cache) { + RingAdd(pdev, &pdev->cache_image_lru, &internal->cache->lru_link); + internal->cache->image = NULL; + } + + if (internal->image.bitmap.palette) { + Palette *palette = (Palette *)VA(pdev, internal->image.bitmap.palette); + ReleasePalette(pdev, CONTAINEROF(palette, InternalPalette, palette)); + } + + chunk_phys = ((QXLDataChunk *)(&internal->image.bitmap + 1))->next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, chunk); + ONDBG(pdev->num_bits_pages--); + + } + + FreeMem(pdev, res); + ONDBG(pdev->num_bits_pages--); + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + +#define BITMAP_ALLOC_BASE (sizeof(Resource) + sizeof(InternalImage) + sizeof(QXLDataChunk)) + +static _inline Resource *GetBitmapImage(PDev *pdev, SURFOBJ *surf, XLATEOBJ *color_trans, + BOOL cache_me, LONG width, LONG height, UINT8 format, + UINT8 *src, UINT32 line_size, UINT32 key) +{ + Resource *image_res; + InternalImage *internal; + size_t alloc_size; + QXLDataChunk *chunk; + UINT8 *src_end; + UINT8 *dest; + UINT8 *dest_end; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + ASSERT(pdev, width > 0 && height > 0); + + ASSERT(pdev, BITS_BUF_MAX > line_size); + alloc_size = BITMAP_ALLOC_BASE + BITS_BUF_MAX - BITS_BUF_MAX % line_size; + alloc_size = MIN(BITMAP_ALLOC_BASE + height * line_size, alloc_size); + image_res = AllocMem(pdev, alloc_size); + ONDBG(pdev->num_bits_pages++); + + image_res->refs = 1; + image_res->free = FreeBitmapImage; + + internal = (InternalImage *)image_res->res; + SetImageId(internal, cache_me, width, height, format, key); + internal->image.descriptor.type = IMAGE_TYPE_BITMAP; + chunk = (QXLDataChunk *)(&internal->image.bitmap + 1); + chunk->data_size = 0; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + internal->image.bitmap.data = PA(pdev, chunk); + internal->image.bitmap.flags = 0; + internal->image.descriptor.width = internal->image.bitmap.x = width; + internal->image.descriptor.height = internal->image.bitmap.y = height; + internal->image.bitmap.format = format; + internal->image.bitmap.stride = line_size; + src_end = src - surf->lDelta; + src += surf->lDelta * (height - 1); + dest = chunk->data; + dest_end = (UINT8 *)image_res + alloc_size; + alloc_size = height * line_size; + for (; src != src_end; src -= surf->lDelta, alloc_size -= line_size) { + PutBytesAlign(pdev, &chunk, &dest, &dest_end, src, line_size, &pdev->num_bits_pages, + alloc_size, line_size); + } + + GetPallette(pdev, &internal->image.bitmap, color_trans); + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); + return image_res; +} + +#define ADAPTIVE_HASH + +static _inline UINT32 GetHash(UINT8 *src, INT32 width, INT32 height, UINT8 format, + UINT32 line_size, LONG stride, XLATEOBJ *color_trans) +{ + UINT32 hash_value = IMAGE_HASH_INIT_VAL(width, height, format); + UINT8 *row_buf = src; + UINT8 last_byte = 0; + UINT8 reminder; + UINT32 i; + int row; + + if (color_trans && color_trans->flXlate == XO_TABLE) { + hash_value = hashlittle(color_trans->pulXlate, + sizeof(*color_trans->pulXlate) * color_trans->cEntries, hash_value); + } + for (row = 0; row < height; row++) { +#ifdef ADAPTIVE_HASH + if (format == BITMAP_FMT_32BIT) { + for (i = 0; i < line_size; i += 4) { + hash_value = hashlittle(row_buf + i, 3, hash_value); + } + } else { + if (format == BITMAP_FMT_4BIT_BE && (width & 0x1)) { + last_byte = row_buf[line_size - 1] & 0xF0; + } else if (format == BITMAP_FMT_1BIT_BE && (reminder = width & 0x7)) { + last_byte = row_buf[line_size - 1] & ~((1 << (8 - reminder)) - 1); + } + if (last_byte) { + hash_value = hashlittle(row_buf, line_size - 1, hash_value); + hash_value = hashlittle(&last_byte, 1, hash_value); + } else { + hash_value = hashlittle(row_buf, line_size, hash_value); + } + } +#else + hash_value = hashlittle(row_buf, line_size, hash_value); +#endif + row_buf += stride; + } + return hash_value; +} + +static _inline UINT32 GetFormatLineSize(INT32 width, ULONG bitmap_format, UINT8 *format) +{ + switch (bitmap_format) { + case BMF_32BPP: + *format = BITMAP_FMT_32BIT; + return width << 2; + case BMF_24BPP: + *format = BITMAP_FMT_24BIT; + return width * 3; + case BMF_16BPP: + *format = BITMAP_FMT_16BIT; + return width << 1; + case BMF_8BPP: + *format = BITMAP_FMT_8BIT; + return width; + case BMF_4BPP: + *format = BITMAP_FMT_4BIT_BE; + return ALIGN(width, 2) >> 1; + case BMF_1BPP: + *format = BITMAP_FMT_1BIT_BE; + return ALIGN(width, 8) >> 3; + default: + return 0; + } +} + +static BOOL ChachSizeTest(PDev *pdev, SURFOBJ *surf) +{ + BOOL ret = (UINT32)surf->sizlBitmap.cx * surf->sizlBitmap.cy <= pdev->max_bitmap_size; + if (!ret) { + DEBUG_PRINT((pdev, 1, "%s: cache size test failed x %d y %d max\n", + __FUNCTION__, + surf->sizlBitmap.cx, + surf->sizlBitmap.cy, + pdev->max_bitmap_size)); + } + return ret; +} + + +static CacheImage *GetChachImage(PDev *pdev, SURFOBJ *surf, XLATEOBJ *color_trans, UINT32 *hash_key) +{ + CacheImage *cache_image; + ULONG pallette_unique; + UINT64 gdi_unique; + int pallette; + UINT32 key; + UINT8 format; + UINT32 line_size; + + pallette = color_trans && (color_trans->flXlate & XO_TABLE); + pallette_unique = pallette ? color_trans->iUniq : 0; + + // NOTE: GDI sometimes gives many instances of the exactly same SURFOBJ (hsurf & iUniq), + // but with (fjBitmap & BMF_DONTCACHE). This opposed to what documented in the MSDN. + if (!surf->iUniq || (surf->fjBitmap & BMF_DONTCACHE) || (pallette && !pallette_unique)) { + gdi_unique = 0; + } else { + gdi_unique = surf->iUniq | ((UINT64)pallette_unique << 32); + } + + if (!(line_size = GetFormatLineSize(surf->sizlBitmap.cx, surf->iBitmapFormat, &format))) { + DEBUG_PRINT((pdev, 0, "%s: bitmap format err\n", __FUNCTION__)); + return FALSE; + } + + if (!ImageKeyGet(pdev, surf->hsurf, gdi_unique, &key)) { + key = GetHash(surf->pvScan0, surf->sizlBitmap.cx, surf->sizlBitmap.cy, format, + line_size, surf->lDelta, color_trans); + ImageKeyPut(pdev, surf->hsurf, gdi_unique, key); + DEBUG_PRINT((pdev, 11, "%s: ImageKeyPut %u\n", __FUNCTION__, key)); + } else { + DEBUG_PRINT((pdev, 11, "%s: ImageKeyGet %u\n", __FUNCTION__, key)); + } + + if (hash_key) { + *hash_key = key; + } + + if ((cache_image = ImageCacheGetByKey(pdev, key, TRUE, format, + surf->sizlBitmap.cx, + surf->sizlBitmap.cy))) { + DEBUG_PRINT((pdev, 11, "%s: ImageCacheGetByKey %u hits %u\n", __FUNCTION__, + key, cache_image->hits)); + return cache_image; + } + + if (ChachSizeTest(pdev, surf)) { + CacheImage *cache_image = AllocCacheImage(pdev); + ImageCacheRemove(pdev, cache_image); + cache_image->key = key; + cache_image->image = NULL; + cache_image->format = format; + cache_image->width = surf->sizlBitmap.cx; + cache_image->height = surf->sizlBitmap.cy; + ImageCacheAdd(pdev, cache_image); + RingAdd(pdev, &pdev->cache_image_lru, &cache_image->lru_link); + DEBUG_PRINT((pdev, 11, "%s: ImageCacheAdd %u\n", __FUNCTION__, key)); + } + return NULL; +} + +static HSEMAPHORE image_id_sem = NULL; + +static _inline UINT32 get_image_serial() +{ + static UINT32 image_id = 0; // move to dev mem and use InterlockedIncrement + UINT32 ret = 0; + + EngAcquireSemaphore(image_id_sem); + ret = ++image_id; + EngReleaseSemaphore(image_id_sem); + return ret; +} + +BOOL QXLGetBitmap(PDev *pdev, QXLDrawable *drawable, PHYSICAL *image_phys, SURFOBJ *surf, + Rect *area, XLATEOBJ *color_trans, UINT32 *hash_key, BOOL use_cache) +{ + Resource *image_res; + InternalImage *internal; + CacheImage *cache_image; + UINT32 key; + UINT8 format; + UINT32 line_size; + UINT8 *src; + INT32 width = area->right - area->left; + INT32 height = area->bottom - area->top; + + ASSERT(pdev, !hash_key || use_cache); + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + if (surf->iType != STYPE_BITMAP) { + DEBUG_PRINT((pdev, 0, "%s: copy from device doing nothing!!!\n", __FUNCTION__)); + return FALSE; + } + + if (area->left < 0 || area->right > surf->sizlBitmap.cx || + area->top < 0 || area->bottom > surf->sizlBitmap.cy) { + DEBUG_PRINT((pdev, 0, "%s: bad dimensions\n", __FUNCTION__)); + return FALSE; + } + + DEBUG_PRINT((pdev, 11, "%s: iUniq=%x DONTCACHE=%x w=%d h=%d cx=%d cy=%d " + "hsurf=%x ctiUniq=%x XO_TABLE=%u format=%u\n", __FUNCTION__, + surf->iUniq, surf->fjBitmap & BMF_DONTCACHE, width, height, + surf->sizlBitmap.cx, surf->sizlBitmap.cy, surf->hsurf, + color_trans ? color_trans->iUniq : 0, + color_trans ? !!(color_trans->flXlate & XO_TABLE) : 0, + surf->iBitmapFormat)); + + if (use_cache) { + cache_image = GetChachImage(pdev, surf, color_trans, hash_key); + if (cache_image && cache_image->image) { + DEBUG_PRINT((pdev, 11, "%s: cached image found %u\n", __FUNCTION__, cache_image->key)); + internal = cache_image->image; + *image_phys = PA(pdev, &internal->image); + image_res = (Resource *)((UINT8 *)internal - sizeof(Resource)); + DrawableAddRes(pdev, drawable, image_res); + return TRUE; + } + } else { + cache_image = NULL; + } + + if (cache_image) { + key = cache_image->key; + width = surf->sizlBitmap.cx; + height = surf->sizlBitmap.cy; + src = surf->pvScan0; + } else { + int scan0_offset; + int dx; + + key = get_image_serial(); + switch (surf->iBitmapFormat) { + case BMF_32BPP: + dx = 0; + scan0_offset = area->left << 2; + break; + case BMF_24BPP: + dx = 0; + scan0_offset = area->left * 3; + break; + case BMF_16BPP: + dx = 0; + scan0_offset = area->left << 1; + break; + case BMF_8BPP: + dx = 0; + scan0_offset = area->left; + break; + case BMF_4BPP: + dx = area->left & 0x01; + scan0_offset = (area->left & ~0x01) >> 1; + break; + case BMF_1BPP: + dx = area->left & 0x07; + scan0_offset = (area->left & ~0x07) >> 3; + break; + default: + DEBUG_PRINT((pdev, 0, "%s: bitmap format err\n", __FUNCTION__)); + return FALSE; + } + width = width + dx; + src = (UINT8 *)surf->pvScan0 + area->top * surf->lDelta + scan0_offset; + + area->left = dx; + area->right = width; + + area->top = 0; + area->bottom = height; + } + + if (!(line_size = GetFormatLineSize(width, surf->iBitmapFormat, &format))) { + DEBUG_PRINT((pdev, 0, "%s: bitmap format err\n", __FUNCTION__)); + return FALSE; + } + + if (!(image_res = GetQuicImage(pdev, surf, color_trans, !!cache_image, width, height, format, + src, line_size, key))) { + image_res = GetBitmapImage(pdev, surf, color_trans, !!cache_image, width, height, format, + src, line_size, key); + } + internal = (InternalImage *)image_res->res; + if ((internal->cache = cache_image)) { + DEBUG_PRINT((pdev, 11, "%s: cache_me %u\n", __FUNCTION__, key)); + cache_image->image = internal; + if (RingItemIsLinked(&cache_image->lru_link)) { + RingRemove(pdev, &cache_image->lru_link); + } + } + *image_phys = PA(pdev, &internal->image); + DrawableAddRes(pdev, drawable, image_res); + RELEASE_RES(pdev, image_res); + return TRUE; +} + +BOOL QXLGetAlphaBitmap(PDev *pdev, QXLDrawable *drawable, PHYSICAL *image_phys, + SURFOBJ *surf, Rect *area) +{ + Resource *image_res; + InternalImage *internal; + CacheImage *cache_image; + UINT64 gdi_unique; + UINT32 key; + UINT8 *src; + INT32 width = area->right - area->left; + INT32 height = area->bottom - area->top; + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + ASSERT(pdev, surf->iBitmapFormat == BMF_32BPP && surf->iType == STYPE_BITMAP); + ASSERT(pdev, area->left >= 0 && area->right <= surf->sizlBitmap.cx && + area->top >= 0 && area->bottom <= surf->sizlBitmap.cy); + + DEBUG_PRINT((pdev, 11, "%s: iUniq=%x DONTCACHE=%x w=%d h=%d cx=%d cy=%d " + "hsurf=%x format=%u\n", __FUNCTION__, surf->iUniq, + surf->fjBitmap & BMF_DONTCACHE, width, height, + surf->sizlBitmap.cx, surf->sizlBitmap.cy, surf->hsurf, + surf->iBitmapFormat)); + + //todo: use GetChachImage + + // NOTE: Same BMF_DONTCACHE issue as in QXLGetBitmap + if (!surf->iUniq || (surf->fjBitmap & BMF_DONTCACHE)) { + gdi_unique = 0; + } else { + gdi_unique = surf->iUniq; + } + + if (!ImageKeyGet(pdev, surf->hsurf, gdi_unique, &key)) { + key = GetHash(surf->pvScan0, surf->sizlBitmap.cx, surf->sizlBitmap.cy, BITMAP_FMT_RGBA, + surf->sizlBitmap.cx << 2, surf->lDelta, NULL); + ImageKeyPut(pdev, surf->hsurf, gdi_unique, key); + DEBUG_PRINT((pdev, 11, "%s: ImageKeyPut %u\n", __FUNCTION__, key)); + } else { + DEBUG_PRINT((pdev, 11, "%s: ImageKeyGet %u\n", __FUNCTION__, key)); + } + + if (cache_image = ImageCacheGetByKey(pdev, key, TRUE, BITMAP_FMT_RGBA, + surf->sizlBitmap.cx, surf->sizlBitmap.cy)) { + DEBUG_PRINT((pdev, 11, "%s: ImageCacheGetByKey %u hits %u\n", __FUNCTION__, + key, cache_image->hits)); + if (internal = cache_image->image) { + DEBUG_PRINT((pdev, 11, "%s: cached image found %u\n", __FUNCTION__, key)); + *image_phys = PA(pdev, &internal->image); + image_res = (Resource *)((UINT8 *)internal - sizeof(Resource)); + DrawableAddRes(pdev, drawable, image_res); + return TRUE; + } + } else if (ChachSizeTest(pdev, surf)) { + CacheImage *cache_image = AllocCacheImage(pdev); + ImageCacheRemove(pdev, cache_image); + cache_image->key = key; + cache_image->image = NULL; + cache_image->format = BITMAP_FMT_RGBA; + cache_image->width = surf->sizlBitmap.cx; + cache_image->height = surf->sizlBitmap.cy; + ImageCacheAdd(pdev, cache_image); + RingAdd(pdev, &pdev->cache_image_lru, &cache_image->lru_link); + DEBUG_PRINT((pdev, 11, "%s: ImageCacheAdd %u\n", __FUNCTION__, key)); + } + + if (cache_image) { + width = surf->sizlBitmap.cx; + height = surf->sizlBitmap.cy; + src = surf->pvScan0; + } else { + src = (UINT8 *)surf->pvScan0 + area->top * surf->lDelta + (area->left << 2); + area->left = 0; + area->right = width; + area->top = 0; + area->bottom = height; + } + + if (!(image_res = GetQuicImage(pdev, surf, NULL, !!cache_image, width, height, + BITMAP_FMT_RGBA, src, width << 2, key))) { + image_res = GetBitmapImage(pdev, surf, NULL, !!cache_image, width, height, + BITMAP_FMT_RGBA, src, width << 2, key); + } + internal = (InternalImage *)image_res->res; + if ((internal->cache = cache_image)) { + DEBUG_PRINT((pdev, 11, "%s: cache_me %u\n", __FUNCTION__, key)); + cache_image->image = internal; + if (RingItemIsLinked(&cache_image->lru_link)) { + RingRemove(pdev, &cache_image->lru_link); + } + } + *image_phys = PA(pdev, &internal->image); + DrawableAddRes(pdev, drawable, image_res); + RELEASE_RES(pdev, image_res); + return TRUE; +} + +BOOL QXLGetBitsFromCache(PDev *pdev, QXLDrawable *drawable, UINT32 hash_key, PHYSICAL *image_phys) +{ + InternalImage *internal; + CacheImage *cache_image; + Resource *image_res; + + if ((cache_image = ImageCacheGetByKey(pdev, hash_key, FALSE, 0, 0, 0)) && + (internal = cache_image->image)) { + *image_phys = PA(pdev, &internal->image); + image_res = (Resource *)((UINT8 *)internal - sizeof(Resource)); + DrawableAddRes(pdev, drawable, image_res); + return TRUE; + } + return FALSE; +} + +BOOL QXLGetMask(PDev *pdev, QXLDrawable *drawable, QMask *qxl_mask, SURFOBJ *mask, POINTL *pos, + BOOL invers, LONG width, LONG height) +{ + Rect area; + + if (!mask) { + qxl_mask->bitmap = 0; + return TRUE; + } + + ASSERT(pdev, pos && qxl_mask && drawable); + if (mask->iBitmapFormat != BMF_1BPP) { + DEBUG_PRINT((pdev, 0, "%s: bitmap format err\n", __FUNCTION__)); + return FALSE; + } + + qxl_mask->flags = invers ? MASK_INVERS : 0; + + area.left = pos->x; + area.right = area.left + width; + area.top = pos->y; + area.bottom = area.top + height; + + if (QXLGetBitmap(pdev, drawable, &qxl_mask->bitmap, mask, &area, NULL, NULL, TRUE)) { + qxl_mask->pos.x = area.left; + qxl_mask->pos.y = area.top; + return TRUE; + } + return FALSE; +} + +static void FreeBuf(PDev *pdev, Resource *res) +{ + ONDBG(pdev->num_buf_pages--); + FreeMem(pdev, res); +} + +UINT8 *QXLGetBuf(PDev *pdev, QXLDrawable *drawable, PHYSICAL *buf_phys, UINT32 size) +{ + Resource *buf_res; + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + if (size > PAGE_SIZE - sizeof(Resource)) { + DEBUG_PRINT((pdev, 0, "%s: size err\n", __FUNCTION__)); + return NULL; + } + + buf_res = (Resource *)AllocMem(pdev, sizeof(Resource) + size); + ONDBG(pdev->num_buf_pages++); + buf_res->refs = 1; + buf_res->free = FreeBuf; + + *buf_phys = PA(pdev, buf_res->res); + DrawableAddRes(pdev, drawable, buf_res); + RELEASE_RES(pdev, buf_res); + return buf_res->res; +} + +#ifdef UPDATE_CMD +void UpdateArea(PDev *pdev, RECTL *area) +{ + QXLCommand *cmd; + QXLOutput *output; + QXLUpdateCmd *updat_cmd; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + output = (QXLOutput *)AllocMem(pdev, sizeof(QXLOutput) + sizeof(QXLUpdateCmd)); + output->num_res = 0; + updat_cmd = (QXLUpdateCmd *)output->data; + updat_cmd->release_info.id = (UINT64)output; + ONDBG(pdev->num_outputs++); //todo: atomic + + CopyRect(&updat_cmd->area, area); + updat_cmd->update_id = ++pdev->update_id; + + WaitForCmdRing(pdev); + cmd = RING_PROD_ITEM(pdev->cmd_ring); + cmd->type = QXL_CMD_UPDATE; + cmd->data = PA(pdev, updat_cmd); + PUSH_CMD(pdev); + do { +#ifdef DBG + { + LARGE_INTEGER timeout; // 1 => 100 nanoseconds + timeout.QuadPart = -1 * (1000 * 1000 * 10); //negative => relative // 1s +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->display_event, &timeout); +#else + EngWaitForSingleObject(pdev->display_event, &timeout); +#endif //(WINVER < 0x0501) + if (*pdev->dev_update_id != pdev->update_id) { + DEBUG_PRINT((pdev, 0, "%s: 0x%lx: timeout\n", __FUNCTION__, pdev)); + } + } +#else +#if (WINVER < 0x0501) + pdev->WaitForEvent(pdev->display_event, NULL); +#else + EngWaitForSingleObject(pdev->display_event, NULL); +#endif //(WINVER < 0x0501) +#endif // DEBUG + mb(); + } while (*pdev->dev_update_id != pdev->update_id); +} + +#else + +void UpdateArea(PDev *pdev, RECTL *area) +{ + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + CopyRect(pdev->update_area, area); + WRITE_PORT_UCHAR(pdev->update_area_port, 0); +} + +#endif + +static _inline void add_rast_glyphs(PDev *pdev, QXLString *str, ULONG count, GLYPHPOS *glyps, + QXLDataChunk **chunk_ptr, UINT8 **now_ptr, + UINT8 **end_ptr, int bpp, POINTL *delta, Point **str_pos) +{ + GLYPHPOS *glyps_end = glyps + count; + QXLDataChunk *chunk = *chunk_ptr; + UINT8 *now = *now_ptr; + UINT8 *end = *end_ptr; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + for (; glyps < glyps_end; glyps++) { + RasterGlyph *glyph; + UINT8 *line; + UINT8 *end_line; + UINT32 stride; + + if (end - now < sizeof(*glyph)) { + NEW_DATA_CHUNK(&pdev->num_glyphs_pages, PAGE_SIZE); + } + + glyph = (RasterGlyph *)now; + if (delta) { + if (*str_pos) { + glyph->render_pos.x = (*str_pos)->x + delta->x; + glyph->render_pos.y = (*str_pos)->y + delta->y; + } else { + glyph->render_pos.x = glyps->ptl.x; + glyph->render_pos.y = glyps->ptl.y; + } + *str_pos = &glyph->render_pos; + } else { + glyph->render_pos.x = glyps->ptl.x; + glyph->render_pos.y = glyps->ptl.y; + } + glyph->glyph_origin.x = glyps->pgdf->pgb->ptlOrigin.x; + glyph->glyph_origin.y = glyps->pgdf->pgb->ptlOrigin.y; + glyph->width = (UINT16)glyps->pgdf->pgb->sizlBitmap.cx; + glyph->height = (UINT16)glyps->pgdf->pgb->sizlBitmap.cy; + now += sizeof(*glyph); + chunk->data_size += sizeof(*glyph); + str->data_size += sizeof(*glyph); + if (!glyph->height) { + continue; + } + + stride = ALIGN(glyph->width * bpp, 8) >> 3; + end_line = (UINT8 *)glyps->pgdf->pgb->aj - stride; + line = (UINT8 *)glyps->pgdf->pgb->aj + stride * (glyph->height - 1); + + for (; line != end_line; line -= stride) { + UINT8 *bits_pos = line; + UINT8 *bits_end = bits_pos + stride; + + for (; bits_pos != bits_end; bits_pos++) { + UINT8 val; + int i; + if (end - now < sizeof(*bits_pos)) { + NEW_DATA_CHUNK(&pdev->num_glyphs_pages, PAGE_SIZE); + } + *(UINT8 *)now = *bits_pos; + now += sizeof(*bits_pos); + chunk->data_size += sizeof(*bits_pos); + str->data_size += sizeof(*bits_pos); + } + } + } + *chunk_ptr = chunk; + *now_ptr = now; + *end_ptr = end; + DEBUG_PRINT((pdev, 14, "%s: done\n", __FUNCTION__)); +} + +static _inline void add_vec_glyphs(PDev *pdev, QXLString *str, ULONG count, GLYPHPOS *glyps, + QXLDataChunk **chunk_ptr, UINT8 **now_ptr, UINT8 **end_ptr, + POINTL *delta, Point **str_pos) +{ + GLYPHPOS *glyps_end = glyps + count; + QXLDataChunk *chunk = *chunk_ptr; + UINT8 *now = *now_ptr; + UINT8 *end = *end_ptr; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + for (; glyps < glyps_end; glyps++) { + VectotGlyph *glyph; + + if (end - now < sizeof(*glyph)) { + NEW_DATA_CHUNK(&pdev->num_glyphs_pages, PAGE_SIZE); + } + chunk->data_size += sizeof(*glyph); + str->data_size += sizeof(*glyph); + glyph = (VectotGlyph *)now; + now += sizeof(*glyph); + + if (delta) { + if (*str_pos) { + glyph->render_pos.x = (*str_pos)->x + delta->x; + glyph->render_pos.y = (*str_pos)->y + delta->y; + } else { + glyph->render_pos.x = glyps->ptl.x; + glyph->render_pos.y = glyps->ptl.y; + } + *str_pos = &glyph->render_pos; + } else { + glyph->render_pos.x = glyps->ptl.x; + glyph->render_pos.y = glyps->ptl.y; + } + glyph->data_size = 0; + GetPathCommon(pdev, glyps->pgdf->ppo, &chunk, &now, &end, &glyph->data_size, + &pdev->num_glyphs_pages); + str->data_size += glyph->data_size; + } + *chunk_ptr = chunk; + *now_ptr = now; + *end_ptr = end; + + DEBUG_PRINT((pdev, 14, "%s: done\n", __FUNCTION__)); +} + +static _inline BOOL add_glyphs(PDev *pdev, QXLString *str, ULONG count, GLYPHPOS *glyps, + QXLDataChunk **chunk, UINT8 **now, UINT8 **end, POINTL *delta, + Point **str_pos) +{ + if (str->flags & STRING_RASTER_A1) { + add_rast_glyphs(pdev, str, count, glyps, chunk, now, end, 1, delta, str_pos); + } else if (str->flags & STRING_RASTER_A4) { + add_rast_glyphs(pdev, str, count, glyps, chunk, now, end, 4, delta, str_pos); + } else { + DEBUG_PRINT((pdev, 0, "%s: vector: untested path, doing nothing!!!\n", __FUNCTION__)); + return FALSE; + add_vec_glyphs(pdev, str, count, glyps, chunk, now, end, delta, str_pos); + } + return TRUE; +} + +static void FreeSring(PDev *pdev, Resource *res) +{ + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + chunk_phys = ((QXLString *)res->res)->chunk.next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, chunk); + ONDBG(pdev->num_glyphs_pages--); + } + + FreeMem(pdev, res); + ONDBG(pdev->num_glyphs_pages--); + + DEBUG_PRINT((pdev, 14, "%s: done\n", __FUNCTION__)); +} + + +#define TEXT_ALLOC_SIZE sizeof(Resource) + sizeof(QXLString) + 512 + +BOOL QXLGetStr(PDev *pdev, QXLDrawable *drawable, PHYSICAL *str_phys, FONTOBJ *font, STROBJ *str) +{ + Resource *str_res; + QXLString *qxl_str; + QXLDataChunk *chunk; + UINT8 *now; + UINT8 *end; + BOOL more; + static int id_QXLGetStr = 0; + POINTL delta; + POINTL *delta_ptr; + Point *str_pos; + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + + str_res = (Resource *)AllocMem(pdev, TEXT_ALLOC_SIZE); + ONDBG(pdev->num_glyphs_pages++); + str_res->refs = 1; + str_res->free = FreeSring; + + qxl_str = (QXLString *)str_res->res; + qxl_str->data_size = 0; + qxl_str->length = (UINT16)str->cGlyphs; + qxl_str->flags = 0; + + if (font->flFontType & FO_TYPE_RASTER) { + qxl_str->flags = (font->flFontType & FO_GRAY16) ? STRING_RASTER_A4 : + STRING_RASTER_A1; + } + + chunk = &qxl_str->chunk; + chunk->data_size = 0; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + + now = chunk->data; + end = (UINT8 *)str_res + TEXT_ALLOC_SIZE; + + if (str->ulCharInc) { + str_pos = NULL; + if (str->flAccel & SO_VERTICAL) { + delta.x = 0; + delta.y = (str->flAccel & SO_REVERSED) ? -(LONG)str->ulCharInc : str->ulCharInc; + } else { + delta.x = (str->flAccel & SO_REVERSED) ? -(LONG)str->ulCharInc : str->ulCharInc; + delta.y = 0; + } + delta_ptr = δ + } else { + delta_ptr = NULL; + } + + STROBJ_vEnumStart(str); + + do { + ULONG count; + GLYPHPOS *glyps; + + if (str->pgp) { + count = str->cGlyphs; + glyps = str->pgp; + more = FALSE; + } else { + more = STROBJ_bEnum(str, &count, &glyps); + + if (more == DDI_ERROR) { + goto error; + } + } + if (!add_glyphs(pdev, qxl_str, count, glyps, &chunk, &now, &end, delta_ptr, &str_pos)) { + goto error; + } + + } while (more); + + *str_phys = PA(pdev, str_res->res); + DrawableAddRes(pdev, drawable, str_res); + RELEASE_RES(pdev, str_res); + + DEBUG_PRINT((pdev, 10, "%s: done size %u\n", __FUNCTION__, qxl_str->data_size)); + return TRUE; + +error: + FreeSring(pdev, str_res); + DEBUG_PRINT((pdev, 10, "%s: error\n", __FUNCTION__)); + return FALSE; +} + +QXLCursorCmd *CursorCmd(PDev *pdev) +{ + QXLCursorCmd *cursor_cmd; + QXLOutput *output; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + output = (QXLOutput *)AllocMem(pdev, sizeof(QXLOutput) + sizeof(QXLCursorCmd)); + output->num_res = 0; + cursor_cmd = (QXLCursorCmd *)output->data; + cursor_cmd->release_info.id = (UINT64)output; + ONDBG(pdev->num_outputs++); //todo: atomic + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); + return cursor_cmd; +} + +void PushCursorCmd(PDev *pdev, QXLCursorCmd *cursor_cmd) +{ + QXLCommand *cmd; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + WaitForCursorRing(pdev); + cmd = RING_PROD_ITEM(pdev->cursor_ring); + cmd->type = QXL_CMD_CURSOR; + cmd->data = PA(pdev, cursor_cmd); + PUSH_CURSOR_CMD(pdev); + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); +} + +typedef struct InternalCursor { + struct InternalCursor *next; + RingItem lru_link; + HSURF hsurf; + ULONG unique; + QXLCursor cursor; +} InternalCursor; + + +#define CURSOR_HASH_VAL(hsurf) (HSURF_HASH_VAL(hsurf) & CURSOR_HASH_NASKE) + +static void CursorCacheRemove(PDev *pdev, InternalCursor *cursor) +{ + InternalCursor **internal; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + ASSERT(pdev, cursor->unique); + internal = &pdev->cursor_cache[CURSOR_HASH_VAL(cursor->hsurf)]; + + while (*internal) { + if ((*internal)->hsurf == cursor->hsurf) { + if ((*internal) == cursor) { + *internal = cursor->next; + RingRemove(pdev, &cursor->lru_link); + RELEASE_RES(pdev, (Resource *)((UINT8 *)cursor - sizeof(Resource))); + pdev->num_cursors--; + return; + } + DEBUG_PRINT((pdev, 0, "%s: unexpected\n", __FUNCTION__)); + } + internal = &(*internal)->next; + } + ASSERT(pdev, FALSE); +} + +static void CursorCacheAdd(PDev *pdev, InternalCursor *cursor) +{ + int key; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + + if (!cursor->unique) { + return; + } + + if (pdev->num_cursors == CURSOR_CACHE_SIZE) { + ASSERT(pdev, RingGetTail(pdev, &pdev->cursors_lru)); + CursorCacheRemove(pdev, CONTAINEROF(RingGetTail(pdev, &pdev->cursors_lru), + InternalCursor, lru_link)); + } + + key = CURSOR_HASH_VAL(cursor->hsurf); + cursor->next = pdev->cursor_cache[key]; + pdev->cursor_cache[key] = cursor; + + RingAdd(pdev, &pdev->cursors_lru, &cursor->lru_link); + GET_RES((Resource *)((UINT8 *)cursor - sizeof(Resource))); + pdev->num_cursors++; +} + +static InternalCursor *CursorCacheGet(PDev *pdev, HSURF hsurf, UINT32 unique) +{ + InternalCursor **internal; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + if (!unique) { + return NULL; + } + + internal = &pdev->cursor_cache[CURSOR_HASH_VAL(hsurf)]; + while (*internal) { + InternalCursor *now = *internal; + if (now->hsurf == hsurf) { + if (now->unique == unique) { + RingRemove(pdev, &now->lru_link); + RingAdd(pdev, &pdev->cursors_lru, &now->lru_link); + return now; + } + CursorCacheRemove(pdev, now); + break; + } + internal = &now->next; + } + return NULL; +} + +static void FreeCursor(PDev *pdev, Resource *res) +{ + PHYSICAL chunk_phys; + + DEBUG_PRINT((pdev, 12, "%s\n", __FUNCTION__)); + chunk_phys = ((InternalCursor *)res->res)->cursor.chunk.next_chunk; + while (chunk_phys) { + QXLDataChunk *chunk = (QXLDataChunk *)VA(pdev, chunk_phys); + chunk_phys = chunk->next_chunk; + FreeMem(pdev, res); + ONDBG(pdev->num_cursor_pages--); + } + + FreeMem(pdev, res); + ONDBG(pdev->num_cursor_pages--); + + DEBUG_PRINT((pdev, 13, "%s: done\n", __FUNCTION__)); +} + + +typedef struct NewCursorInfo { + QXLCursor *cursor; + QXLDataChunk *chunk; + UINT8 *now; + UINT8 *end; +} NewCursorInfo; + +#define CURSOR_ALLOC_SIZE (PAGE_SIZE << 1) + +static BOOL GetCursorCommon(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf, + UINT16 type, NewCursorInfo *info) +{ + InternalCursor *internal; + QXLCursor *cursor; + Resource *res; + ULONG unique; + UINT8 *src; + UINT8 *src_end; + int line_size; + + + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + + unique = (surf->fjBitmap & BMF_DONTCACHE) ? 0 : surf->iUniq; + + if ((internal = CursorCacheGet(pdev, surf->hsurf, unique))) { + res = (Resource *)((UINT8 *)internal - sizeof(Resource)); + CursorCmdAddRes(pdev, cmd, res); + cmd->u.set.shape = PA(pdev, &internal->cursor); + return TRUE; + } + + ASSERT(pdev, sizeof(Resource) + sizeof(InternalCursor) < CURSOR_ALLOC_SIZE); + res = (Resource *)AllocMem(pdev, CURSOR_ALLOC_SIZE); + ONDBG(pdev->num_cursor_pages++); + res->refs = 1; + res->free = FreeCursor; + + internal = (InternalCursor *)res->res; + internal->hsurf = surf->hsurf; + internal->unique = unique; + RingItemInit(&internal->lru_link); + + cursor = info->cursor = &internal->cursor; + cursor->header.type = type; + cursor->header.unique = unique ? ++pdev->last_cursor_id : 0; + cursor->header.width = (UINT16)surf->sizlBitmap.cx; + cursor->header.height = (type == CURSOR_TYPE_MONO) ? (UINT16)surf->sizlBitmap.cy >> 1 : + (UINT16)surf->sizlBitmap.cy; + cursor->header.hot_spot_x = (UINT16)hot_x; + cursor->header.hot_spot_y = (UINT16)hot_y; + + cursor->data_size = 0; + + info->chunk = &cursor->chunk; + info->chunk->data_size = 0; + info->chunk->prev_chunk = 0; + info->chunk->next_chunk = 0; + + info->now = info->chunk->data; + info->end = (UINT8 *)res + CURSOR_ALLOC_SIZE; + + switch (type) { + case CURSOR_TYPE_ALPHA: + case CURSOR_TYPE_COLOR32: + line_size = cursor->header.width << 2; + break; + case CURSOR_TYPE_MONO: + line_size = ALIGN(cursor->header.width, 8) >> 3; + break; + case CURSOR_TYPE_COLOR4: + line_size = ALIGN(cursor->header.width, 2) >> 1; + break; + case CURSOR_TYPE_COLOR8: + line_size = cursor->header.width; + break; + case CURSOR_TYPE_COLOR16: + line_size = cursor->header.width << 1; + break; + case CURSOR_TYPE_COLOR24: + line_size = cursor->header.width * 3; + break; + } + + cursor->data_size = line_size * surf->sizlBitmap.cy; + src = surf->pvScan0; + src_end = src + (surf->lDelta * surf->sizlBitmap.cy); + for (; src != src_end; src += surf->lDelta) { + PutBytes(pdev, &info->chunk, &info->now, &info->end, src, line_size, + &pdev->num_cursor_pages, PAGE_SIZE); + } + + CursorCacheAdd(pdev, internal); + CursorCmdAddRes(pdev, cmd, res); + RELEASE_RES(pdev, res); + cmd->u.set.shape = PA(pdev, &internal->cursor); + DEBUG_PRINT((pdev, 11, "%s: done, data_size %u\n", __FUNCTION__, cursor->data_size)); + return FALSE; +} + +BOOL GetAlphaCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf) +{ + NewCursorInfo info; + + ASSERT(pdev, surf->iBitmapFormat == BMF_32BPP); + ASSERT(pdev, surf->sizlBitmap.cx > 0 && surf->sizlBitmap.cy > 0); + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + GetCursorCommon(pdev, cmd, hot_x, hot_y, surf, CURSOR_TYPE_ALPHA, &info); + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +BOOL GetMonoCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf) +{ + NewCursorInfo info; + + ASSERT(pdev, surf->iBitmapFormat == BMF_1BPP); + ASSERT(pdev, surf->sizlBitmap.cy > 0 && (surf->sizlBitmap.cy & 1) == 0); + ASSERT(pdev, surf->sizlBitmap.cx > 0); + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + GetCursorCommon(pdev, cmd, hot_x, hot_y, surf, CURSOR_TYPE_MONO, &info); + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +BOOL GetColorCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf, + SURFOBJ *mask, XLATEOBJ *color_trans) +{ + NewCursorInfo info; + UINT16 type; + + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + ASSERT(pdev, surf && mask); + ASSERT(pdev, surf->sizlBitmap.cx > 0 && surf->sizlBitmap.cy); + + if ( mask->sizlBitmap.cx != surf->sizlBitmap.cx || + mask->sizlBitmap.cy != surf->sizlBitmap.cy * 2 ) { + DEBUG_PRINT((pdev, 0, "%s: err mask size, surf(%d, %d) mask(%d, %d)\n", + __FUNCTION__, + surf->sizlBitmap.cx, + surf->sizlBitmap.cy, + mask->sizlBitmap.cx, + mask->sizlBitmap.cy)); + return FALSE; + } + + switch (surf->iBitmapFormat) { + case BMF_32BPP: + type = CURSOR_TYPE_COLOR32; + break; + case BMF_24BPP: + type = CURSOR_TYPE_COLOR24; + break; + case BMF_16BPP: + type = CURSOR_TYPE_COLOR16; + break; + case BMF_8BPP: + type = CURSOR_TYPE_COLOR8; + break; + case BMF_4BPP: + type = CURSOR_TYPE_COLOR4; + break; + default: + DEBUG_PRINT((pdev, 0, "%s: unexpected format\n", __FUNCTION__)); + return FALSE; + } + if (!GetCursorCommon(pdev, cmd, hot_x, hot_y, surf, type, &info)) { + int line_size; + UINT8 *src; + UINT8 *src_end; + + if (type == CURSOR_TYPE_COLOR8) { + + DEBUG_PRINT((pdev, 8, "%s: CURSOR_TYPE_COLOR8\n", __FUNCTION__)); + ASSERT(pdev, color_trans); + ASSERT(pdev, color_trans->pulXlate); + ASSERT(pdev, color_trans->flXlate & XO_TABLE); + ASSERT(pdev, color_trans->cEntries == 256); + + if (pdev->bitmap_format == BMF_32BPP) { + PutBytes(pdev, &info.chunk, &info.now, &info.end, (UINT8 *)color_trans->pulXlate, + 256 << 2, &pdev->num_cursor_pages, PAGE_SIZE); + } else { + int i; + + for (i = 0; i < 256; i++) { + UINT32 ent = _16bppTo32bpp(color_trans->pulXlate[i]); + PutBytes(pdev, &info.chunk, &info.now, &info.end, (UINT8 *)&ent, + 4, &pdev->num_cursor_pages, PAGE_SIZE); + } + } + info.cursor->data_size += 256 << 2; + } else if (type == CURSOR_TYPE_COLOR4) { + + ASSERT(pdev, color_trans); + ASSERT(pdev, color_trans->pulXlate); + ASSERT(pdev, color_trans->flXlate & XO_TABLE); + ASSERT(pdev, color_trans->cEntries == 16); + + if (pdev->bitmap_format == BMF_32BPP) { + PutBytes(pdev, &info.chunk, &info.now, &info.end, (UINT8 *)color_trans->pulXlate, + 16 << 2, &pdev->num_cursor_pages, PAGE_SIZE); + } else { + int i; + + for (i = 0; i < 16; i++) { + UINT32 ent = _16bppTo32bpp(color_trans->pulXlate[i]); + PutBytes(pdev, &info.chunk, &info.now, &info.end, (UINT8 *)&ent, + 4, &pdev->num_cursor_pages, PAGE_SIZE); + } + } + info.cursor->data_size += 16 << 2; + } + line_size = ALIGN(mask->sizlBitmap.cx, 8) >> 3; + info.cursor->data_size += line_size * surf->sizlBitmap.cy; + src = mask->pvScan0; + src_end = src + (mask->lDelta * surf->sizlBitmap.cy); + + for (; src != src_end; src += mask->lDelta) { + PutBytes(pdev, &info.chunk, &info.now, &info.end, src, line_size, + &pdev->num_cursor_pages, PAGE_SIZE); + } + } + + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +BOOL GetTransparentCursor(PDev *pdev, QXLCursorCmd *cmd) +{ + Resource *res; + InternalCursor *internal; + QXLCursor *cursor; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, sizeof(Resource) + sizeof(InternalCursor) < PAGE_SIZE); + + res = (Resource *)AllocMem(pdev, sizeof(Resource) + sizeof(InternalCursor)); + ONDBG(pdev->num_cursor_pages++); + res->refs = 1; + res->free = FreeCursor; + + internal = (InternalCursor *)res->res; + internal->hsurf = NULL; + internal->unique = 0; + RingItemInit(&internal->lru_link); + + cursor = &internal->cursor; + cursor->header.type = CURSOR_TYPE_MONO; + cursor->header.unique = 0; + cursor->header.width = 0; + cursor->header.height = 0; + cursor->header.hot_spot_x = 0; + cursor->header.hot_spot_y = 0; + cursor->data_size = 0; + cursor->chunk.data_size = 0; + + CursorCmdAddRes(pdev, cmd, res); + RELEASE_RES(pdev, res); + cmd->u.set.shape = PA(pdev, &internal->cursor); + + DEBUG_PRINT((pdev, 8, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +static void quic_usr_error(QuicUsrContext *usr, const char *format, ...) +{ + QuicData *quic_data = (QuicData *)usr; + va_list ap; + + va_start(ap, format); + DebugPrintV(quic_data->pdev, format, ap); + va_end(ap); + EngDebugBreak(); +} + +static void quic_usr_warn(QuicUsrContext *usr, const char *format, ...) +{ + QuicData *quic_data = (QuicData *)usr; + va_list ap; + + va_start(ap, format); + DebugPrintV(quic_data->pdev, format, ap); + va_end(ap); +} + +static void *quic_usr_malloc(QuicUsrContext *usr, int size) +{ + return EngAllocMem(0, size, ALLOC_TAG); +} + +static void quic_usr_free(QuicUsrContext *usr, void *ptr) +{ + EngFreeMem(ptr); +} + +BOOL ResInit(PDev *pdev) +{ + QuicData *usr_data; + + if (!(usr_data = EngAllocMem(FL_ZERO_MEMORY, sizeof(QuicData), ALLOC_TAG))) { + return FALSE; + } + usr_data->user.error = quic_usr_error; + usr_data->user.warn = quic_usr_warn; + usr_data->user.info = quic_usr_warn; + usr_data->user.malloc = quic_usr_malloc; + usr_data->user.free = quic_usr_free; + usr_data->user.more_space = quic_usr_more_space; + usr_data->user.more_lines = quic_usr_more_lines; + usr_data->pdev = pdev; + if (!(usr_data->quic = quic_create(&usr_data->user))) { + EngFreeMem(usr_data); + return FALSE; + } + pdev->quic_data = usr_data; + return TRUE; +} + +void ResDestroy(PDev *pdev) +{ + QuicData *usr_data = pdev->quic_data; + quic_destroy(usr_data->quic); + EngFreeMem(usr_data); +} + +void ResInitGlobals() +{ + image_id_sem = EngCreateSemaphore(); + if (!image_id_sem) { + EngDebugBreak(); + } + quic_init(); +} + +void ResDestroyGlobals() +{ + EngDeleteSemaphore(image_id_sem); + image_id_sem = NULL; +} + diff --git a/display/res.h b/display/res.h new file mode 100644 index 0000000..9e2b261 --- /dev/null +++ b/display/res.h @@ -0,0 +1,56 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _H_RES +#define _H_RES + +#include "qxldd.h" + +UINT64 ReleaseOutput(PDev *pdev, UINT64 output_id); + +QXLDrawable *Drawable(PDev *pdev, UINT8 type, RECTL *area, CLIPOBJ *clip); +void PushDrawable(PDev *pdev, QXLDrawable *drawable); + +BOOL QXLGetPath(PDev *pdev, QXLDrawable *drawable, PHYSICAL *path_phys, PATHOBJ *path); +BOOL QXLGetMask(PDev *pdev, QXLDrawable *drawable, QMask *qxl_mask, SURFOBJ *mask, POINTL *pos, + BOOL invers, LONG width, LONG height); +BOOL QXLGetBrush(PDev *pdev, QXLDrawable *drawable, Brush *qxl_brush, + BRUSHOBJ *brush, POINTL *brush_pos); +BOOL QXLGetBitmap(PDev *pdev, QXLDrawable *drawable, PHYSICAL *image_phys, SURFOBJ *surf, + Rect *area, XLATEOBJ *color_trans, UINT32 *hash_key, BOOL use_cache); +BOOL QXLGetBitsFromCache(PDev *pdev, QXLDrawable *drawable, UINT32 hash_key, PHYSICAL *image_phys); +BOOL QXLGetAlphaBitmap(PDev *pdev, QXLDrawable *drawable, PHYSICAL *image_phys, SURFOBJ *surf, + Rect *area); +UINT8 *QXLGetBuf(PDev *pdev, QXLDrawable *drawable, PHYSICAL *buf_phys, UINT32 size); +BOOL QXLGetStr(PDev *pdev, QXLDrawable *drawable, PHYSICAL *str_phys, FONTOBJ *font, STROBJ *str); + +void UpdateArea(PDev *pdev, RECTL *area); + +QXLCursorCmd *CursorCmd(PDev *pdev); +void PushCursorCmd(PDev *pdev, QXLCursorCmd *cursor_cmd); + +BOOL GetAlphaCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf); +BOOL GetColorCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf, + SURFOBJ *mask, XLATEOBJ *color_trans); +BOOL GetMonoCursor(PDev *pdev, QXLCursorCmd *cmd, LONG hot_x, LONG hot_y, SURFOBJ *surf); +BOOL GetTransparentCursor(PDev *pdev, QXLCursorCmd *cmd); + +BOOL ResInit(PDev *pdev); +void ResDestroy(PDev *pdev); +void ResInitGlobals(); +void ResDestroyGlobals(); +#endif diff --git a/display/rop.c b/display/rop.c new file mode 100644 index 0000000..56ba3df --- /dev/null +++ b/display/rop.c @@ -0,0 +1,1526 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "os_dep.h" +#include "qxldd.h" +#include "utils.h" +#include "res.h" +#include "rop.h" + + +enum ROP3type { + ROP3_TYPE_ERR, + ROP3_TYPE_FILL, + ROP3_TYPE_OPAQUE, + ROP3_TYPE_COPY, + ROP3_TYPE_BLEND, + ROP3_TYPE_BLACKNESS, + ROP3_TYPE_WHITENESS, + ROP3_TYPE_INVERS, + ROP3_TYPE_ROP3, + ROP3_TYPE_NOP, +}; + + +ROP3Info rops2[] = { + {QXL_EFFECT_OPAQUE, 0, ROP3_TYPE_BLACKNESS, ROPD_OP_BLACKNESS}, //0 + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_OR | ROPD_INVERS_RES}, //DPon + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, + ROPD_INVERS_BRUSH | ROPD_OP_AND}, //DPna + {QXL_EFFECT_OPAQUE, ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_BRUSH | ROPD_OP_PUT}, //Pn + {QXL_EFFECT_BLACKNESS_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, + ROPD_INVERS_DEST | ROPD_OP_AND}, //PDna + {QXL_EFFECT_REVERT_ON_DUP, ROP3_DEST, ROP3_TYPE_INVERS, ROPD_OP_INVERS}, //Dn + {QXL_EFFECT_REVERT_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_XOR}, //DPx + {QXL_EFFECT_BLACKNESS_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, + ROPD_OP_AND | ROPD_INVERS_RES}, //DPan + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_AND}, //DPa + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_XOR | ROPD_INVERS_RES},//DPxn + {QXL_EFFECT_NOP, ROP3_DEST, ROP3_TYPE_NOP, 0}, //D + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, + ROPD_INVERS_BRUSH | ROPD_OP_OR}, //DPno + {QXL_EFFECT_OPAQUE, ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_PUT}, //P + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_DEST | ROPD_OP_OR},//PDno + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_OR}, //DPo + {QXL_EFFECT_OPAQUE, 0, ROP3_TYPE_WHITENESS, ROPD_OP_WHITENESS}, //1 +}; + + +ROP3Info rops3[] = { + + //todo: update rop3 effect + + {QXL_EFFECT_OPAQUE, 0, ROP3_TYPE_BLACKNESS, 0}, //0 + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x01}, //DPSoon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x02}, //DPSona + //PSon + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_OR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x04}, //SDPona + //DPon + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_OR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x06}, //PDSxnon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x07}, //PDSaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x08}, //SDPnaa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x09}, //PDSxon + //DPna + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, + ROPD_INVERS_BRUSH | ROPD_OP_AND}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x0b}, //PSDnaon + //SPna + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_INVERS_BRUSH | ROPD_OP_AND }, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x0d}, //PDSnaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x0e}, //PDSonon + //Pn + {QXL_EFFECT_OPAQUE, ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_BRUSH | ROPD_OP_PUT}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x10}, //PDSona + //DSon + {QXL_EFFECT_BLEND, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_OR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x12}, //SDPxnon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x13}, //SDPaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x14}, //DPSxnon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x15}, //DPSaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x16}, //PSDPSanaxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x17}, //SSPxDSxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x18}, //SPxPDxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x19}, //SDPSanaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1a}, //PDSPaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1b}, //SDPSxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1c}, //PSDPaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1d}, //DSPDxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1e}, //PDSox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x1f}, //PDSoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x20}, //DPSnaa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x21}, //SDPxon + //DSna + {QXL_EFFECT_NOP_ON_DUP, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_INVERS_SRC | ROPD_OP_AND}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x23}, //SPDnaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x24}, //SPxDSxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x25}, //PDSPanaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x26}, //SDPSaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x27}, //SDPSxnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x28}, //DPSxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x29}, //PSDPSaoxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2a}, //DPSana + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2b}, //SSPxPDxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2c}, //SPDSoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2d}, //PSDnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2e}, //PSDPxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x2f}, //PSDnoan + //PSna + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_INVERS_SRC | ROPD_OP_AND}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x31}, //SDPnaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x32}, //SDPSoox + {QXL_EFFECT_OPAQUE, ROP3_SRC, ROP3_TYPE_COPY, ROPD_INVERS_SRC | ROPD_OP_PUT}, //Sn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x34}, //SPDSaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x35}, //SPDSxnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x36}, //SDPox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x37}, //SDPoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x38}, //PSDPoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x39}, //SPDnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x3a}, //SPDSxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x3b}, //SPDnoan + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_XOR}, //PSx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x3d}, //SPDSonox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x3e}, //SPDSnaox + //PSan + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_AND | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x40}, //PSDnaa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x41}, //DPSxon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x42}, //SDxPDxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x43}, //SPDSanaxn + //SDna + {QXL_EFFECT_BLEND, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_INVERS_DEST | ROPD_OP_AND}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x45}, //DPSnaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x46}, //DSPDaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x47}, //PSDPxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x48}, //SDPxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x49}, //PDSPDaoxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4a}, //DPSDoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4b}, //PDSnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4c}, //SDPana + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4d}, //SSPxDSxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4e}, //PDSPxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x4f}, //PDSnoan + //PDna + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_DEST | ROPD_OP_AND}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x51}, //DSPnaon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x52}, //DPSDaox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x53}, //SPDSxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x54}, //DPSonon + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST, ROP3_TYPE_INVERS, 0}, //Dn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x56}, //DPSox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x57}, //DPSoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x58}, //PDSPoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x59}, //DPSnox + {QXL_EFFECT_REVERT_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_XOR},//DPx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x5b}, //DPSDonox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x5c}, //DPSDxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x5d}, //DPSnoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x5e}, //DPSDnaox + //DPan + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_AND | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x60}, //PDSxa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x61}, //DSPDSaoxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x62}, //DSPDoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x63}, //SDPnox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x64}, //SDPSoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x65}, //DSPnox + {QXL_EFFECT_REVERT_ON_DUP, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_XOR}, //DSx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x67}, //SDPSonox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x68}, //DSPDSonoxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x69}, //PDSxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6a}, //DPSax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6b}, //PSDPSoaxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6c}, //SDPax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6d}, //PDSPDoaxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6e}, //SDPSnoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x6f}, //PDSxnan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x70}, //PDSana + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x71}, //SSDxPDxaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x72}, //SDPSxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x73}, //SDPnoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x74}, //DSPDxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x75}, //DSPnoan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x76}, //SDPSnaox + //DSan + {QXL_EFFECT_BLEND, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_AND | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x78}, //PDSax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x79}, //DSPDSoaxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7a}, //DPSDnoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7b}, //SDPxnan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7c}, //SPDSnoax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7d}, //DPSxnan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7e}, //SPxDSxo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x7f}, //DPSaan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x80}, //DPSaa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x81}, //SPxDSxon + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x82}, //DPSxna + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x83}, //SPDSnoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x84}, //SDPxna + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x85}, //PDSPnoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x86}, //DSPDSoaxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x87}, //PDSaxn + {QXL_EFFECT_NOP_ON_DUP, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_AND}, //DSa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x89}, //SDPSnaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8a}, //DSPnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8b}, //DSPDxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8c}, //SDPnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8d}, //SDPSxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8e}, //SSDxPDxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x8f}, //PDSanan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x90}, //PDSxna + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x91}, //SDPSnoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x92}, //DPSDPoaxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x93}, //SPDaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x94}, //PSDPSoaxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x95}, //DPSaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x96}, //DPSxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x97}, //PSDPSonoxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x98}, //SDPSonoxn + //DSxn + {QXL_EFFECT_BLEND, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_XOR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9a}, //DPSnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9b}, //SDPSoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9c}, //SPDnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9d}, //DSPDoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9e}, //DSPDSaoxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0x9f}, //PDSxan + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_AND}, //DPa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa1}, //PDSPnaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa2}, //DPSnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa3}, //DPSDxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa4}, //PDSPonoxn + //PDxn + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_XOR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa6}, //DSPnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa7}, //PDSPoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa8}, //DPSoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xa9}, //DPSoxn + {QXL_EFFECT_NOP, ROP3_DEST, ROP3_TYPE_NOP, 0}, //D + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xab}, //DPSono + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xac}, //SPDSxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xad}, //DPSDaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xae}, //DSPnao + //DPno + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_BRUSH | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb0}, //PDSnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb1}, //PDSPxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb2}, //SSPxDSxox + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb3}, //SDPanan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb4}, //PSDnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb5}, //DPSDoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb6}, //DPSDPaoxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb7}, //SDPxan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb8}, //PSDPxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xb9}, //DSPDaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xba}, //DPSnao + //DSno + {QXL_EFFECT_NOP_ON_DUP, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_INVERS_SRC | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xbc}, //SPDSanax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xbd}, //SDxPDxan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xbe}, //DPSxo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xbf}, //DPSano + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_AND}, //PSa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc1}, //SPDSnaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc2}, //SPDSonoxn + //PSxn + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_XOR | ROPD_INVERS_RES}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc4}, //SPDnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc5}, //SPDSxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc6}, //SDPnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc7}, //PSDPoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc8}, //SDPoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xc9}, //SPDoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xca}, //DPSDxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xcb}, //SPDSaoxn + {QXL_EFFECT_OPAQUE, ROP3_SRC, ROP3_TYPE_COPY, ROPD_OP_PUT}, //S + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xcd}, //SDPono + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xce}, //SDPnao + //SPno + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_INVERS_BRUSH | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd0}, //PSDnoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd1}, //PSDPxoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd2}, //PDSnax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd3}, //SPDSoaxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd4}, //SSPxPDxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd5}, //DPSanan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd6}, //PSDPSaoxx + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd7}, //DPSxan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd8}, //PDSPxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xd9}, //SDPSaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xda}, //DPSDanax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xdb}, //SPxDSxan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xdc}, //SPDnao + //SDno + {QXL_EFFECT_BLEND, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_INVERS_DEST | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xde}, //SDPxo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xdf}, //SDPano + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe0}, //PDSoa + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe1}, //PDSoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe2}, //DSPDxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe3}, //PSDPaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe4}, //SDPSxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe5}, //PDSPaoxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe6}, //SDPSanax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe7}, //SPxPDxan + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe8}, //SSPxDSxax + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xe9}, //DSPDSanaxxn + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xea}, //DPSao + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xeb}, //DPSxno + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xec}, //SDPao + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xed}, //SDPxno + {QXL_EFFECT_NOP_ON_DUP, ROP3_SRC | ROP3_DEST, ROP3_TYPE_BLEND, ROPD_OP_OR}, //DSo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xef}, //SDPnoo + {QXL_EFFECT_OPAQUE, ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_PUT}, //P + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf1}, //PDSono + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf2}, //PDSnao + //PSno + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_INVERS_SRC | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf4}, //PSDnao + //PDno + {QXL_EFFECT_BLEND, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_INVERS_DEST | ROPD_OP_OR}, + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf6}, //PDSxo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf7}, //PDSano + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf8}, //PDSao + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xf9}, //PDSxno + {QXL_EFFECT_NOP_ON_DUP, ROP3_DEST | ROP3_BRUSH, ROP3_TYPE_FILL, ROPD_OP_OR}, //DPo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xfb}, //DPSnoo + {QXL_EFFECT_OPAQUE, ROP3_SRC | ROP3_BRUSH, ROP3_TYPE_OPAQUE, ROPD_OP_OR}, //PSo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xfd}, //PSDnoo + {QXL_EFFECT_BLEND, ROP3_ALL, ROP3_TYPE_ROP3, 0xfe}, //DPSoo + {QXL_EFFECT_OPAQUE, 0, ROP3_TYPE_WHITENESS, 1}, //1 +}; + + +static BOOL DoFill(PDev *pdev, RECTL *area, CLIPOBJ *clip, BRUSHOBJ *brush, POINTL *brush_pos, + ROP3Info *rop_info, SURFOBJ *mask, POINTL *mask_pos, BOOL invers_mask) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area && brush); + + if (!(drawable = Drawable(pdev, QXL_DRAW_FILL, area, clip))) { + return FALSE; + } + + if (!QXLGetBrush(pdev, drawable, &drawable->u.fill.brush, brush, brush_pos) || + !QXLGetMask(pdev, drawable, &drawable->u.fill.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.fill.rop_decriptor = rop_info->method_data; + + drawable->effect = mask ? QXL_EFFECT_BLEND : rop_info->effect; + + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL GetBitmap(PDev *pdev, QXLDrawable *drawable, PHYSICAL *bitmap_phys, SURFOBJ *surf, + Rect *area, XLATEOBJ *color_trans, BOOL use_cache) +{ + DEBUG_PRINT((pdev, 9, "%s\n", __FUNCTION__)); + if (surf->iType != STYPE_BITMAP) { + ASSERT(pdev, (PDev *)surf->dhpdev == pdev); + DEBUG_PRINT((pdev, 9, "%s copy from self\n", __FUNCTION__)); + *bitmap_phys = 0; + drawable->bitmap_offset = (UINT16)((UINT8 *)bitmap_phys - (UINT8 *)drawable); + drawable->bitmap_area = *area; + area->right = area->right - area->left; + area->left = 0; + area->bottom = area->bottom - area->top; + area->top = 0; + return TRUE; + } + return QXLGetBitmap(pdev, drawable, &drawable->u.opaque.src_bitmap, surf, + area, color_trans, NULL, use_cache); +} + +static _inline UINT8 GdiScaleModeToQxl(ULONG scale_mode) +{ + return (scale_mode == HALFTONE) ? IMAGE_SCALE_INTERPOLATE : IMAGE_SCALE_NEAREST; +} + +static BOOL DoOpaque(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *src, RECTL *src_rect, + XLATEOBJ *color_trans, BRUSHOBJ *brush, POINTL *brush_pos, + UINT16 rop_decriptor, SURFOBJ *mask, POINTL *mask_pos, BOOL invers_mask, + ULONG scale_mode) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area && brush && src_rect && src); + + if (!(drawable = Drawable(pdev, QXL_DRAW_OPAQUE, area, clip))) { + return FALSE; + } + + drawable->u.opaque.scale_mode = GdiScaleModeToQxl(scale_mode); + CopyRect(&drawable->u.opaque.src_area, src_rect); + if (!QXLGetBrush(pdev, drawable, &drawable->u.opaque.brush, brush, brush_pos) || + !QXLGetMask(pdev, drawable, &drawable->u.opaque.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top) || + !GetBitmap(pdev, drawable, &drawable->u.opaque.src_bitmap, src, + &drawable->u.opaque.src_area, color_trans, TRUE)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.opaque.rop_decriptor = rop_decriptor; + drawable->effect = mask ? QXL_EFFECT_BLEND : QXL_EFFECT_OPAQUE; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL StreamTest(PDev *pdev, SURFOBJ *src_surf, XLATEOBJ *color_trans, RECTL *src_rect, + RECTL *dest) +{ + Ring *ring = &pdev->update_trace; + UpdateTrace *trace = (UpdateTrace *)ring->next; + LONG src_pixmap_pixels = src_surf->sizlBitmap.cx * src_surf->sizlBitmap.cy; + + if (src_pixmap_pixels <= 128 * 128) { + return TRUE; + } + + for (;;) { + if (SameRect(dest, &trace->area) || (trace->hsurf == src_surf->hsurf && + src_pixmap_pixels / RectSize(src_rect) > 100)) { + UINT32 now = *pdev->mm_clock; + BOOL ret; + + if (now != trace->last_time && now - trace->last_time < 1000 / 5) { + ret = FALSE; + trace->last_time = now - 1; // asumong mm clock is active so delta t == 0 is + // imposibole. frocing delata t to be at least 1. + } else { + trace->last_time = now; + ret = TRUE; + } + RingRemove(pdev, (RingItem *)trace); + RingAdd(pdev, ring, (RingItem *)trace); + return ret; + } + if (trace->link.next == ring) { + break; + } + trace = (UpdateTrace *)trace->link.next; + } + RingRemove(pdev, (RingItem *)trace); + trace->area = *dest; + trace->last_time = *pdev->mm_clock; + + if (IsUniqueSurf(src_surf, color_trans)) { + trace->hsurf = src_surf->hsurf; + } else { + trace->hsurf = NULL; + } + RingAdd(pdev, ring, (RingItem *)trace); + + return TRUE; +} + +static BOOL DoCopy(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *src, RECTL *src_rect, + XLATEOBJ *color_trans, UINT16 rop_decriptor, SURFOBJ *mask, POINTL *mask_pos, + BOOL invers_mask, ULONG scale_mode) +{ + QXLDrawable *drawable; + BOOL use_cache; + + ASSERT(pdev, pdev && area && src_rect && src); + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + if (!(drawable = Drawable(pdev, QXL_DRAW_COPY, area, clip))) { + return FALSE; + } + + if (mask) { + drawable->effect = QXL_EFFECT_BLEND; + use_cache = TRUE; + } else { + drawable->effect = QXL_EFFECT_OPAQUE; + use_cache = StreamTest(pdev, src, color_trans, src_rect, area); + } + + drawable->u.copy.scale_mode = GdiScaleModeToQxl(scale_mode); + CopyRect(&drawable->u.copy.src_area, src_rect); + if (!QXLGetMask(pdev, drawable, &drawable->u.copy.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top) || + !GetBitmap(pdev, drawable, &drawable->u.copy.src_bitmap, src, &drawable->u.copy.src_area, + color_trans, use_cache)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.copy.rop_decriptor = rop_decriptor; + PushDrawable(pdev, drawable); + DEBUG_PRINT((pdev, 7, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +static BOOL DoCopyBits(PDev *pdev, CLIPOBJ *clip, RECTL *area, POINTL *src_pos) +{ + QXLDrawable *drawable; + + ASSERT(pdev, pdev && area && src_pos); + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + if (area->left == src_pos->x && area->top == src_pos->y) { + DEBUG_PRINT((pdev, 0, "%s: NOP\n", __FUNCTION__)); + return TRUE; + } + + if (!(drawable = Drawable(pdev, QXL_COPY_BITS, area, clip))) { + return FALSE; + } + CopyPoint(&drawable->u.copy_bits.src_pos, src_pos); + drawable->effect = QXL_EFFECT_OPAQUE; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL DoBlend(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *src, RECTL *src_rect, + XLATEOBJ *color_trans, ROP3Info *rop_info, SURFOBJ *mask, POINTL *mask_pos, + BOOL invers_mask, ULONG scale_mode) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area && src_rect && src); + + if (!(drawable = Drawable(pdev, QXL_DRAW_BLEND, area, clip))) { + return FALSE; + } + + drawable->u.blend.scale_mode = GdiScaleModeToQxl(scale_mode); + CopyRect(&drawable->u.blend.src_area, src_rect); + if (!QXLGetMask(pdev, drawable, &drawable->u.blend.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top) || + !GetBitmap(pdev, drawable, &drawable->u.blend.src_bitmap, src, &drawable->u.blend.src_area, + color_trans, TRUE)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.blend.rop_decriptor = rop_info->method_data; + drawable->effect = mask ? QXL_EFFECT_BLEND : rop_info->effect; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL DoBlackness(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *mask, POINTL *mask_pos, + BOOL invers_mask) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area); + + if (!(drawable = Drawable(pdev, QXL_DRAW_BLACKNESS, area, clip))) { + return FALSE; + } + + if (!QXLGetMask(pdev, drawable, &drawable->u.blackness.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->effect = mask ? QXL_EFFECT_BLEND : QXL_EFFECT_OPAQUE; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL DoWhiteness(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *mask, POINTL *mask_pos, + BOOL invers_mask) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area); + + if (!(drawable = Drawable(pdev, QXL_DRAW_WHITENESS, area, clip))) { + return FALSE; + } + + if (!QXLGetMask(pdev, drawable, &drawable->u.whiteness.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->effect = mask ? QXL_EFFECT_BLEND : QXL_EFFECT_OPAQUE; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL DoInvers(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *mask, POINTL *mask_pos, + BOOL invers_mask) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area); + + if (!(drawable = Drawable(pdev, QXL_DRAW_INVERS, area, clip))) { + return FALSE; + } + + if (!QXLGetMask(pdev, drawable, &drawable->u.invers.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->effect = mask ? QXL_EFFECT_BLEND : QXL_EFFECT_REVERT_ON_DUP; + PushDrawable(pdev, drawable); + return TRUE; +} + +static BOOL DoROP3(PDev *pdev, RECTL *area, CLIPOBJ *clip, SURFOBJ *src, RECTL *src_rect, + XLATEOBJ *color_trans, BRUSHOBJ *brush, POINTL *brush_pos, UINT8 rop3, + SURFOBJ *mask, POINTL *mask_pos, BOOL invers_mask, ULONG scale_mode) +{ + QXLDrawable *drawable; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + ASSERT(pdev, pdev && area && brush && src_rect && src); + + if (!(drawable = Drawable(pdev, QXL_DRAW_ROP3, area, clip))) { + return FALSE; + } + + drawable->u.rop3.scale_mode = GdiScaleModeToQxl(scale_mode); + CopyRect(&drawable->u.rop3.src_area, src_rect); + if (!QXLGetBrush(pdev, drawable, &drawable->u.rop3.brush, brush, brush_pos) || + !QXLGetMask(pdev, drawable, &drawable->u.rop3.mask, mask, mask_pos, invers_mask, + area->right - area->left, area->bottom - area->top) || + !GetBitmap(pdev, drawable, &drawable->u.rop3.src_bitmap, src, &drawable->u.rop3.src_area, + color_trans, TRUE)) { + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.rop3.rop3 = rop3; + drawable->effect = mask ? QXL_EFFECT_BLEND : QXL_EFFECT_BLEND; //for now + PushDrawable(pdev, drawable); + return TRUE; +} + +static SURFOBJ *Copy16bppArea(PDev *pdev, RECTL *area) +{ + SIZEL size; + HSURF bitmap; + SURFOBJ *surf_obj; + UINT8 *dest_line; + UINT8 *dest_end_line; + LONG src_stride; + UINT8 *src_line; + + size.cx = area->right - area->left; + size.cy = area->bottom - area->top; + + if (!(bitmap = (HSURF)EngCreateBitmap(size, 0, BMF_16BPP, 0, NULL))) { + DEBUG_PRINT((pdev, 0, "%s: EngCreateBitmap failed\n", __FUNCTION__)); + return NULL; + } + + if (!EngAssociateSurface(bitmap, pdev->eng, 0)) { + DEBUG_PRINT((pdev, 0, "%s: EngAssociateSurface failed\n", __FUNCTION__)); + goto error; + } + + if (!(surf_obj = EngLockSurface(bitmap))) { + DEBUG_PRINT((pdev, 0, "%s: EngLockSurface failed\n", __FUNCTION__)); + goto error; + } + + dest_line = surf_obj->pvScan0; + dest_end_line = dest_line + surf_obj->lDelta * surf_obj->sizlBitmap.cy; + src_stride = pdev->draw_surf->lDelta; + src_line = (UINT8 *)pdev->draw_surf->pvScan0 + area->top * src_stride + (area->left << 2); + + for (; dest_line != dest_end_line; dest_line += surf_obj->lDelta, src_line += src_stride) { + UINT16 *dest = (UINT16 *)dest_line; + UINT16 *end = dest + surf_obj->sizlBitmap.cx; + UINT32 *src = (UINT32 *)src_line; + for (; dest < end; dest++, src++) { + *dest = ((*src & 0x00f80000) >> 9) | + ((*src & 0x0000f800) >> 6) | + ((*src & 0x000000f8) >> 3); + } + } + return surf_obj; + +error: + EngDeleteSurface(bitmap); + return NULL; + +} + +static BOOL BitBltFromDev(PDev *pdev, SURFOBJ *dest, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, RECTL *dest_rect, POINTL src_pos, + POINTL *mask_pos, BRUSHOBJ *brush, POINTL *brush_pos, ROP4 rop4) +{ + RECTL area; + SURFOBJ* surf_obj; + BOOL ret; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + area.top = MAX(0, src_pos.y); + area.bottom = MIN(src_pos.y + dest_rect->bottom - dest_rect->top, pdev->resolution.cy); + area.left = MAX(0, src_pos.x); + area.right = MIN(src_pos.x + dest_rect->right - dest_rect->left, pdev->resolution.cx); + + UpdateArea(pdev, &area); + + if (pdev->bitmap_format == BMF_16BPP) { + surf_obj = Copy16bppArea(pdev, &area); + if (!surf_obj) { + return FALSE; + } + src_pos.y = src_pos.y - area.top; + src_pos.x = src_pos.x - area.left; + } else { + surf_obj = pdev->draw_surf; + } + + if (rop4 == 0xcccc) { + ret = EngCopyBits(dest, surf_obj, clip, color_trans, dest_rect, &src_pos); + } else { + ret = EngBitBlt(dest, surf_obj, mask, clip, color_trans, dest_rect, &src_pos, + mask_pos, brush, brush_pos, rop4); + } + + if (pdev->bitmap_format == BMF_16BPP) { + HSURF surf = surf_obj->hsurf; + + EngUnlockSurface(surf_obj); + EngDeleteSurface(surf); + } + + return ret; +} + +BOOL _inline __DrvBitBlt(PDev *pdev, RECTL *dest_rect, CLIPOBJ *clip, SURFOBJ *src, RECTL *src_rect, + XLATEOBJ *color_trans, BRUSHOBJ *brush, POINTL *brush_pos, ULONG rop3, + SURFOBJ *mask, POINTL *mask_pos, BOOL invers_mask, ULONG scale_mode) +{ + ROP3Info *rop_info = &rops3[rop3]; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + switch (rop_info->method_type) { + case ROP3_TYPE_FILL: + return DoFill(pdev, dest_rect, clip, brush, brush_pos, rop_info, mask, mask_pos, + invers_mask); + case ROP3_TYPE_OPAQUE: + return DoOpaque(pdev, dest_rect, clip, src, src_rect, color_trans, brush, brush_pos, + rop_info->method_data, mask, mask_pos, invers_mask, scale_mode); + case ROP3_TYPE_COPY: + return DoCopy(pdev, dest_rect, clip, src, src_rect, color_trans, rop_info->method_data, + mask, mask_pos, invers_mask, scale_mode); + case ROP3_TYPE_BLEND: + return DoBlend(pdev, dest_rect, clip, src, src_rect, color_trans, rop_info, mask, mask_pos, + invers_mask, scale_mode); + case ROP3_TYPE_BLACKNESS: + return DoBlackness(pdev, dest_rect, clip, mask, mask_pos, invers_mask); + case ROP3_TYPE_WHITENESS: + return DoWhiteness(pdev, dest_rect, clip, mask, mask_pos, invers_mask); + case ROP3_TYPE_INVERS: + return DoInvers(pdev, dest_rect, clip, mask, mask_pos, invers_mask); + case ROP3_TYPE_ROP3: + return DoROP3(pdev, dest_rect, clip, src, src_rect, color_trans, brush, brush_pos, + (UINT8)rop_info->method_data, mask, mask_pos, invers_mask, scale_mode); + case ROP3_TYPE_NOP: + return TRUE; + default: + DEBUG_PRINT((pdev, 0, "%s: Error\n", __FUNCTION__)); + //EngSetError + return FALSE; + } +} + +#ifdef SUPPORT_BRUSH_AS_MASK +SURFOBJ *BrushToMask(PDev *pdev, BRUSHOBJ *brush) +{ + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + if (!brush || brush->iSolidColor != ~0) { + DEBUG_PRINT((pdev, 8, "%s: no mask, brush 0x%x color 0x%x\n", + __FUNCTION__, brush, brush ? brush->iSolidColor : 0)); + return NULL; + } + + if (!brush->pvRbrush && !BRUSHOBJ_pvGetRbrush(brush)) { + DEBUG_PRINT((pdev, 0, "%s: realize failed\n", __FUNCTION__)); + return NULL; + } + DEBUG_PRINT((pdev, 7, "%s: done 0x%x\n", __FUNCTION__, brush->pvRbrush)); + return NULL; +} +#endif + + +static _inline BOOL TestSrcBits(PDev *pdev, SURFOBJ *src, XLATEOBJ *color_trans) +{ + if (src) { + switch (src->iBitmapFormat) { + case BMF_32BPP: + case BMF_24BPP: + case BMF_16BPP: { + ULONG bit_fields[3]; + ULONG ents; + + if (!color_trans || (color_trans->flXlate & XO_TRIVIAL)) { + return TRUE; + } + + ents = XLATEOBJ_cGetPalette(color_trans, XO_SRCBITFIELDS, 3, bit_fields); + ASSERT(pdev, ents == 3); + switch (src->iBitmapFormat) { + case BMF_32BPP: + case BMF_24BPP: + if (bit_fields[0] != 0x00ff0000 || bit_fields[1] != 0x0000ff00 || + bit_fields[2] != 0x000000ff) { + DEBUG_PRINT((pdev, 11, "%s: BMF_32BPP/24BPP r 0x%x g 0x%x b 0x%x\n", + __FUNCTION__, + bit_fields[0], + bit_fields[1], + bit_fields[2])); + return FALSE; + } + break; + case BMF_16BPP: + if (bit_fields[0] != 0x7c00 || bit_fields[1] != 0x03e0 || + bit_fields[2] != 0x001f) { + DEBUG_PRINT((pdev, 11, "%s: BMF_16BPP r 0x%x g 0x%x b 0x%x\n", + __FUNCTION__, + bit_fields[0], + bit_fields[1], + bit_fields[2])); + return FALSE; + } + break; + } + return TRUE; + } + case BMF_8BPP: + case BMF_4BPP: + case BMF_1BPP: + return color_trans && (color_trans->flXlate & XO_TABLE); + default: + return FALSE; + } + } + return TRUE; +} + +static QXLRESULT BitBltCommon(PDev *pdev, SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, RECTL *dest_rect, RECTL *src_rect, + POINTL *mask_pos, BRUSHOBJ *brush, POINTL *brush_pos, ROP4 rop4, + ULONG scale_mode, COLORADJUSTMENT *color_adjust) +{ + ULONG rop3; + ULONG second_rop3; +#ifdef SUPPORT_BRUSH_AS_MASK + SURFOBJ *brush_mask = NULL; +#endif + QXLRESULT res; + + if (!PrepareBrush(brush)) { + return QXL_FAILED; + } + + if ((rop3 = rop4 & 0xff) == (second_rop3 = ((rop4 >> 8) & 0xff))) { + return __DrvBitBlt(pdev, dest_rect, clip, src, src_rect, color_trans, brush, brush_pos, + rop3, NULL, NULL, FALSE, scale_mode) ? QXL_SUCCESS : QXL_FAILED; + } + + if (!mask) { + DEBUG_PRINT((pdev, 5, "%s: no mask. rop4 is 0x%x\n", __FUNCTION__, rop4)); + return QXL_UNSUPPORTED; +#ifdef SUPPORT_BRUSH_AS_MASK + brush_mask = BrushToMask(pdev, brush); + if (!brush_mask) { + DEBUG_PRINT((pdev, 5, "%s: no mask. rop4 is 0x%x\n", __FUNCTION__, rop4)); + return QXL_UNSUPPORTED; + } + mask = brush_mask; + ASSERT(pdev, mask_pos); +#endif + } + DEBUG_PRINT((pdev, 5, "%s: mask, rop4 is 0x%x\n", __FUNCTION__, rop4)); + ASSERT(pdev, mask_pos); + res = (__DrvBitBlt(pdev, dest_rect, clip, src, src_rect, color_trans, brush, brush_pos, rop3, + mask, mask_pos, FALSE, scale_mode) && + __DrvBitBlt(pdev, dest_rect, clip, src, src_rect, color_trans, brush, brush_pos, + second_rop3, mask, mask_pos, TRUE, scale_mode)) ? QXL_SUCCESS : QXL_FAILED; +#ifdef SUPPORT_BRUSH_AS_MASK + if (brush_mask) { + //free brush_mask; + } +#endif + return res; +} + +static _inline void FixDestParams(PDev *pdev, SURFOBJ *dest, CLIPOBJ **in_clip, + RECTL *dest_rect, RECTL *area, POINTL **in_mask_pos, + POINTL *local_mask_pos) +{ + CLIPOBJ *clip; + + area->top = MAX(dest_rect->top, 0); + area->left = MAX(dest_rect->left, 0); + area->bottom = MIN(dest->sizlBitmap.cy, dest_rect->bottom); + area->right = MIN(dest->sizlBitmap.cx, dest_rect->right); + + clip = *in_clip; + if (clip) { + if (clip->iDComplexity == DC_TRIVIAL) { + clip = NULL; + } else { + SectRect(&clip->rclBounds, area, area); + if (clip->iDComplexity == DC_RECT) { + clip = NULL; + } + } + *in_clip = clip; + } + + if (in_mask_pos && *in_mask_pos) { + POINTL *mask_pos; + ASSERT(pdev, local_mask_pos); + mask_pos = *in_mask_pos; + local_mask_pos->x = mask_pos->x + (area->left - dest_rect->left); + local_mask_pos->y = mask_pos->y + (area->top - dest_rect->top); + *in_mask_pos = local_mask_pos; + } +} + +static QXLRESULT _BitBlt(PDev *pdev, SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, RECTL *dest_rect, POINTL *src_pos, + POINTL *mask_pos, BRUSHOBJ *brush, POINTL *brush_pos, ROP4 rop4) +{ + RECTL area; + POINTL local_mask_pos; + RECTL src_rect; + RECTL *src_rect_ptr; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + if (!TestSrcBits(pdev, src, color_trans)) { + DEBUG_PRINT((pdev, 1, "%s: test src failed\n", __FUNCTION__)); + + return EngBitBlt(dest, src, mask, clip, color_trans, dest_rect, src_pos, mask_pos, brush, + brush_pos, rop4) ? QXL_SUCCESS : QXL_FAILED; + } + +#if 0 + if (rop4 == 0xccaa) { + DEBUG_PRINT((pdev, 7, "%s: rop4 is 0xccaa, call EngBitBlt\n", __FUNCTION__)); + return QXL_UNSUPPORTED; + } +#endif + + ASSERT(pdev, dest->iType == STYPE_BITMAP || dest->hsurf == pdev->surf); + ASSERT(pdev, !src || src->iType == STYPE_BITMAP || src->hsurf == pdev->surf); + ASSERT(pdev, dest_rect && dest_rect->left < dest_rect->right && + dest_rect->top < dest_rect->bottom); + + FixDestParams(pdev, dest, &clip, dest_rect, &area, &mask_pos, &local_mask_pos); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 0, "%s: empty rect\n", __FUNCTION__)); + return QXL_SUCCESS; + } + + if (src && src_pos) { + POINTL local_pos; + + local_pos.x = src_pos->x + (area.left - dest_rect->left); + local_pos.y = src_pos->y + (area.top - dest_rect->top); + + if (dest->iType == STYPE_BITMAP) { + return BitBltFromDev(pdev, dest, mask, clip, color_trans, &area, local_pos, mask_pos, + brush, brush_pos, rop4) ? QXL_SUCCESS : QXL_FAILED; + } + + if (src->iType != STYPE_BITMAP && rop4 == 0xcccc) { //SRCCOPY no mask + return DoCopyBits(pdev, clip, &area, &local_pos) ? QXL_SUCCESS : QXL_FAILED; + } + + src_rect.left = local_pos.x; + src_rect.right = src_rect.left + (area.right - area.left); + src_rect.top = local_pos.y; + src_rect.bottom = src_rect.top + (area.bottom - area.top); + src_rect_ptr = &src_rect; + } else { + src_rect_ptr = NULL; + } + return BitBltCommon(pdev, dest, src, mask, clip, color_trans, &area, src_rect_ptr, + mask_pos, brush, brush_pos, rop4, COLORONCOLOR, NULL); +} + +BOOL APIENTRY DrvBitBlt(SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, RECTL *dest_rect, POINTL *src_pos, + POINTL *mask_pos, BRUSHOBJ *brush, POINTL *brush_pos, ROP4 rop4) +{ + PDev *pdev; + QXLRESULT res; + + if (dest->iType == STYPE_BITMAP) { + ASSERT(NULL, src && src->iType != STYPE_BITMAP && src->dhpdev && src_pos); + pdev = (PDev *)src->dhpdev; + } else { + ASSERT(NULL, dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + CountCall(pdev, CALL_COUNTER_BIT_BLT); + } + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + if ((res = _BitBlt(pdev, dest, src, mask, clip, color_trans, dest_rect, src_pos, mask_pos, + brush, brush_pos, rop4))) { + if (res == QXL_UNSUPPORTED) { + DEBUG_PRINT((pdev, 4, "%s: call EngBitBlt\n", __FUNCTION__)); + return EngBitBlt(dest, src, mask, clip, color_trans, dest_rect, src_pos, mask_pos, + brush, brush_pos, rop4); + } + return FALSE; + + } + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + return TRUE; +} + +BOOL APIENTRY DrvCopyBits(SURFOBJ *dest, SURFOBJ *src, CLIPOBJ *clip, + XLATEOBJ *color_trans, RECTL *dest_rect, POINTL *src_pos) +{ + PDev *pdev; + + if (dest->iType == STYPE_BITMAP) { + ASSERT(NULL, src && src->iType != STYPE_BITMAP && src->dhpdev && src_pos); + pdev = (PDev *)src->dhpdev; + } else { + ASSERT(NULL, dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + CountCall(pdev, CALL_COUNTER_BIT_BLT); + } + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + return _BitBlt(pdev, dest, src, NULL, clip, color_trans, dest_rect, src_pos, NULL, NULL, + NULL, /*SRCCOPY*/ 0xcccc) == QXL_SUCCESS ? TRUE : FALSE; +} + +static _inline BOOL TestStretchCondition(PDev *pdev, SURFOBJ *src, XLATEOBJ *color_trans, + COLORADJUSTMENT *color_adjust, + RECTL *dest_rect, RECTL *src_rect) +{ + int src_size; + int dest_size; + + if (color_adjust && (color_adjust->caFlags & CA_NEGATIVE)) { + return FALSE; + } + + if (IsCacheableSurf(src, color_trans)) { + return TRUE; + } + + src_size = (src_rect->right - src_rect->left) * (src_rect->bottom - src_rect->top); + dest_size = (dest_rect->right - dest_rect->left) * (dest_rect->bottom - dest_rect->top); + + return dest_size - src_size >= -(src_size >> 2); + +} + +static _inline unsigned int Scale(unsigned int val, unsigned int base_unit, unsigned int dest_unit) +{ + unsigned int div; + unsigned int mod; + + div = dest_unit * val / base_unit; + mod = dest_unit * val % base_unit; + return (mod >= (base_unit >> 1)) ? div + 1: div; +} + +static QXLRESULT _StretchBlt(PDev *pdev, SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, COLORADJUSTMENT *color_adjust, + POINTL *brush_pos, RECTL *dest_rect, RECTL *src_rect, + POINTL *mask_pos, ULONG mode, BRUSHOBJ *brush, DWORD rop4) +{ + RECTL area; + POINTL local_mask_pos; + RECTL local_dest_rect; + RECTL local_src_rect; + + DEBUG_PRINT((pdev, 6, "%s\n", __FUNCTION__)); + + ASSERT(pdev, src_rect && src_rect->left < src_rect->right && + src_rect->top < src_rect->bottom); + ASSERT(pdev, dest_rect); + + + if (dest_rect->left > dest_rect->right) { + local_dest_rect.left = dest_rect->right; + local_dest_rect.right = dest_rect->left; + } else { + local_dest_rect.left = dest_rect->left; + local_dest_rect.right = dest_rect->right; + } + + if (dest_rect->top > dest_rect->bottom) { + local_dest_rect.top = dest_rect->bottom; + local_dest_rect.bottom = dest_rect->top; + } else { + local_dest_rect.top = dest_rect->top; + local_dest_rect.bottom = dest_rect->bottom; + } + + if (!TestSrcBits(pdev, src, color_trans)) { + DEBUG_PRINT((pdev, 1, "%s: test src failed\n", __FUNCTION__)); + return QXL_UNSUPPORTED; + } + + if (!TestStretchCondition(pdev, src, color_trans, color_adjust, &local_dest_rect, src_rect)) { + DEBUG_PRINT((pdev, 1, "%s: stretch test failed\n", __FUNCTION__)); + return QXL_UNSUPPORTED; + } + + FixDestParams(pdev, dest, &clip, &local_dest_rect, &area, &mask_pos, &local_mask_pos); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 0, "%s: empty dest\n", __FUNCTION__)); + return QXL_SUCCESS; + } + //todo: use FixStreatchSrcArea + if (!SameRect(&local_dest_rect, &area)) { // possibly generate incosistent rendering on dest + // edges + unsigned int w_dest; + unsigned int h_dest; + + if ((w_dest = local_dest_rect.right - local_dest_rect.left) != area.right - area.left) { + unsigned int w_src = src_rect->right - src_rect->left; + unsigned int delta; + + if ((delta = area.left - local_dest_rect.left)) { + local_src_rect.left = src_rect->left + Scale(delta, w_dest, w_src); + } else { + local_src_rect.left = src_rect->left; + } + + if ((delta = local_dest_rect.right - area.right)) { + local_src_rect.right = src_rect->right - Scale(delta, w_dest, w_src); + } else { + local_src_rect.right = src_rect->right; + } + + local_src_rect.left = MIN(local_src_rect.left, src->sizlBitmap.cx - 1); + local_src_rect.right = MAX(local_src_rect.right, local_src_rect.left + 1); + + } else { + local_src_rect.left = src_rect->left; + local_src_rect.right = src_rect->right; + } + + if ((h_dest = local_dest_rect.bottom - local_dest_rect.top) != area.bottom - area.top) { + unsigned int h_src = src_rect->bottom - src_rect->top; + unsigned int delta; + + if ((delta = area.top - local_dest_rect.top)) { + local_src_rect.top = src_rect->top + Scale(delta, h_dest, h_src); + } else { + local_src_rect.top = src_rect->top; + } + + if ((delta = local_dest_rect.bottom - area.bottom)) { + local_src_rect.bottom = src_rect->bottom - Scale(delta, h_dest, h_src); + } else { + local_src_rect.bottom = src_rect->bottom; + } + + local_src_rect.top = MIN(local_src_rect.top, src->sizlBitmap.cy - 1); + local_src_rect.bottom = MAX(local_src_rect.bottom, local_src_rect.top + 1); + + } else { + local_src_rect.top = src_rect->top; + local_src_rect.bottom = src_rect->bottom; + } + + src_rect = &local_src_rect; + } + + return BitBltCommon(pdev, dest, src, mask, clip, color_trans, &area, src_rect, mask_pos, + brush, brush_pos, rop4, mode, color_adjust); +} + +BOOL APIENTRY DrvStretchBltROP(SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, COLORADJUSTMENT *color_adjust, + POINTL *brush_pos, RECTL *dest_rect, RECTL *src_rect, + POINTL *mask_pos, ULONG mode, BRUSHOBJ *brush, DWORD rop4) +{ + PDev *pdev; + QXLRESULT res; + + if (!src || src->iType != STYPE_BITMAP) { + ASSERT(NULL, src->dhpdev); + pdev = (PDev *)src->dhpdev; + goto punt; + } + + ASSERT(NULL, dest && dest->iType != STYPE_BITMAP && dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + CountCall(pdev, CALL_COUNTER_STRETCH_BLT_ROP); + + if ((res = _StretchBlt(pdev, dest, src, mask, clip, color_trans, + mode == HALFTONE ? color_adjust: NULL, brush_pos, + dest_rect, src_rect, mask_pos, mode, brush,rop4))) { + if (res == QXL_UNSUPPORTED) { + goto punt; + } + return FALSE; + } + return TRUE; + +punt: + return EngStretchBltROP(dest, src, mask, clip, color_trans, color_adjust, brush_pos, + dest_rect, src_rect, mask_pos, mode, brush, rop4); +} + +BOOL APIENTRY DrvStretchBlt(SURFOBJ *dest, SURFOBJ *src, SURFOBJ *mask, CLIPOBJ *clip, + XLATEOBJ *color_trans, COLORADJUSTMENT *color_adjust, + POINTL *halftone_brush_pos, RECTL *dest_rect, RECTL *src_rect, + POINTL *mask_pos, ULONG mode) +{ + PDev *pdev; + QXLRESULT res; + + ASSERT(NULL, src); + if (src->iType != STYPE_BITMAP) { + ASSERT(NULL, src->dhpdev); + pdev = (PDev *)src->dhpdev; + goto punt; + } + ASSERT(NULL, dest && dest->iType != STYPE_BITMAP && dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + CountCall(pdev, CALL_COUNTER_STRETCH_BLT); + if ((res = _StretchBlt(pdev, dest, src, mask, clip, color_trans, + mode == HALFTONE ? color_adjust: NULL, NULL, dest_rect, + src_rect, mask_pos, mode, NULL, (mask) ? 0xccaa: 0xcccc))) { + if (res == QXL_UNSUPPORTED) { + goto punt; + } + return FALSE; + } + return TRUE; + +punt: + return EngStretchBlt(dest, src, mask, clip, color_trans, color_adjust, halftone_brush_pos, + dest_rect, src_rect, mask_pos, mode); +} + +static BOOL FixStreatchSrcArea(const RECTL *orig_dest, const RECTL *dest, const RECTL *orig_src, + const SIZEL *bitmap_size, RECTL *src) +{ + unsigned int w_dest; + unsigned int h_dest; + + if (SameRect(orig_dest, dest)) { + return FALSE; + } + + // possibly generate incosistent rendering on dest edges + + if ((w_dest = orig_dest->right - orig_dest->left) != dest->right - dest->left) { + unsigned int w_src = orig_src->right - orig_src->left; + unsigned int delta; + + if ((delta = dest->left - orig_dest->left)) { + src->left = orig_src->left + Scale(delta, w_dest, w_src); + } else { + src->left = orig_src->left; + } + + if ((delta = orig_dest->right - dest->right)) { + src->right = orig_src->right - Scale(delta, w_dest, w_src); + } else { + src->right = orig_src->right; + } + + src->left = MIN(src->left, bitmap_size->cx - 1); + src->right = MAX(src->right, src->left + 1); + + } else { + src->left = orig_src->left; + src->right = orig_src->right; + } + + if ((h_dest = orig_dest->bottom - orig_dest->top) != dest->bottom - dest->top) { + unsigned int h_src = orig_src->bottom - orig_src->top; + unsigned int delta; + + if ((delta = dest->top - orig_dest->top)) { + src->top = orig_src->top + Scale(delta, h_dest, h_src); + } else { + src->top = orig_src->top; + } + + if ((delta = orig_dest->bottom - dest->bottom)) { + src->bottom = orig_src->bottom - Scale(delta, h_dest, h_src); + } else { + src->bottom = orig_src->bottom; + } + + src->top = MIN(src->top, bitmap_size->cy - 1); + src->bottom = MAX(src->bottom, src->top + 1); + + } else { + src->top = orig_src->top; + src->bottom = orig_src->bottom; + } + + return TRUE; +} + +BOOL APIENTRY DrvAlphaBlend(SURFOBJ *dest, SURFOBJ *src, CLIPOBJ *clip, XLATEOBJ *color_trans, + RECTL *dest_rect, RECTL *src_rect, BLENDOBJ *bland) +{ + QXLDrawable *drawable; + PDev *pdev; + RECTL area; + RECTL local_src; + + ASSERT(NULL, src && dest); + if (src->iType != STYPE_BITMAP) { + ASSERT(NULL, src->dhpdev); + pdev = (PDev *)src->dhpdev; + goto punt; + } + ASSERT(NULL, dest->iType != STYPE_BITMAP && dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + ASSERT(pdev, src_rect && src_rect->left < src_rect->right && + src_rect->top < src_rect->bottom); + ASSERT(pdev, dest_rect && dest_rect->left < dest_rect->right && + dest_rect->top < dest_rect->bottom); + + CountCall(pdev, CALL_COUNTER_ALPHA_BLEND); + + if (bland->BlendFunction.BlendOp != AC_SRC_OVER) { + DEBUG_PRINT((pdev, 0, "%s: unexpected BlendOp\n", __FUNCTION__)); + goto punt; + } + + if (bland->BlendFunction.SourceConstantAlpha == 0) { + return TRUE; + } + + if (!TestSrcBits(pdev, src, color_trans)) { + DEBUG_PRINT((pdev, 1, "%s: test src failed\n", __FUNCTION__)); + goto punt; + } + + if (!TestStretchCondition(pdev, src, color_trans, NULL, dest_rect, src_rect)) { + DEBUG_PRINT((pdev, 1, "%s: stretch test failed\n", __FUNCTION__)); + goto punt; + } + + FixDestParams(pdev, dest, &clip, dest_rect, &area, NULL, NULL); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 0, "%s: empty dest\n", __FUNCTION__)); + return TRUE; + } + + if (!(drawable = Drawable(pdev, QXL_DRAW_ALPHA_BLEND, &area, clip))) { + DEBUG_PRINT((pdev, 0, "%s: Drawable failed\n", __FUNCTION__)); + return FALSE; + } + + if (FixStreatchSrcArea(dest_rect, &area, src_rect, &src->sizlBitmap, &local_src)) { + src_rect = &local_src; + } + + CopyRect(&drawable->u.alpha_blend.src_area, src_rect); + if (bland->BlendFunction.AlphaFormat == AC_SRC_ALPHA) { + ASSERT(pdev, src->iBitmapFormat == BMF_32BPP); + if (!QXLGetAlphaBitmap(pdev, drawable, &drawable->u.alpha_blend.src_bitmap, src, + &drawable->u.alpha_blend.src_area)) { + DEBUG_PRINT((pdev, 0, "%s: QXLGetAlphaBitmap failed\n", __FUNCTION__)); + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + } else { + if (!QXLGetBitmap(pdev, drawable, &drawable->u.alpha_blend.src_bitmap, src, + &drawable->u.alpha_blend.src_area, color_trans, NULL, TRUE)) { + DEBUG_PRINT((pdev, 0, "%s: QXLGetBitmap failed\n", __FUNCTION__)); + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + } + drawable->u.alpha_blend.alpha = bland->BlendFunction.SourceConstantAlpha; + drawable->effect = QXL_EFFECT_BLEND; + + PushDrawable(pdev, drawable); + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + + return TRUE; + +punt: + return EngAlphaBlend(dest, src, clip, color_trans, dest_rect, src_rect, bland); +} + +BOOL APIENTRY DrvTransparentBlt(SURFOBJ *dest, SURFOBJ *src, CLIPOBJ *clip, XLATEOBJ *color_trans, + RECTL *dest_rect, RECTL *src_rect, ULONG trans_color, + ULONG reserved) +{ + QXLDrawable *drawable; + PDev *pdev; + RECTL area; + RECTL local_src; + + ASSERT(NULL, src && dest); + if (src->iType != STYPE_BITMAP) { + ASSERT(NULL, src->dhpdev); + pdev = (PDev *)src->dhpdev; + goto punt; + } + + ASSERT(NULL, dest->iType != STYPE_BITMAP && dest->dhpdev); + pdev = (PDev *)dest->dhpdev; + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + + ASSERT(pdev, src_rect && src_rect->left < src_rect->right && + src_rect->top < src_rect->bottom); + ASSERT(pdev, dest_rect && dest_rect->left < dest_rect->right && + dest_rect->top < dest_rect->bottom); + + CountCall(pdev, CALL_COUNTER_TRANSPARENT_BLT); + + if (!TestSrcBits(pdev, src, color_trans)) { + DEBUG_PRINT((pdev, 1, "%s: test src failed\n", __FUNCTION__)); + goto punt; + } + + if (!TestStretchCondition(pdev, src, color_trans, NULL, dest_rect, src_rect)) { + DEBUG_PRINT((pdev, 1, "%s: stretch test failed\n", __FUNCTION__)); + goto punt; + } + + FixDestParams(pdev, dest, &clip, dest_rect, &area, NULL, NULL); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 0, "%s: empty dest\n", __FUNCTION__)); + return TRUE; + } + + if (!(drawable = Drawable(pdev, QXL_DRAW_TRANSPARENT, &area, clip))) { + DEBUG_PRINT((pdev, 0, "%s: Drawable failed\n", __FUNCTION__)); + return FALSE; + } + + if (FixStreatchSrcArea(dest_rect, &area, src_rect, &src->sizlBitmap, &local_src)) { + src_rect = &local_src; + } + + CopyRect(&drawable->u.transparent.src_area, src_rect); + if (!QXLGetBitmap(pdev, drawable, &drawable->u.transparent.src_bitmap, src, + &drawable->u.transparent.src_area, color_trans, NULL, TRUE)) { + DEBUG_PRINT((pdev, 0, "%s: QXLGetBitmap failed\n", __FUNCTION__)); + ReleaseOutput(pdev, drawable->release_info.id); + return FALSE; + } + + drawable->u.transparent.src_color = trans_color; + switch (src->iBitmapFormat) { + case BMF_32BPP: + case BMF_24BPP: + drawable->u.transparent.true_color = trans_color; + break; + case BMF_16BPP: + drawable->u.transparent.true_color = _16bppTo32bpp(trans_color); + break; + case BMF_8BPP: + case BMF_4BPP: + case BMF_1BPP: + ASSERT(pdev, trans_color < color_trans->cEntries); + if (pdev->bitmap_format == BMF_32BPP) { + drawable->u.transparent.true_color = color_trans->pulXlate[trans_color]; + } else { + ASSERT(pdev, pdev->bitmap_format == BMF_16BPP); + drawable->u.transparent.true_color = _16bppTo32bpp(color_trans->pulXlate[trans_color]); + } + break; + return color_trans && (color_trans->flXlate & XO_TABLE); + } + + drawable->effect = QXL_EFFECT_BLEND; + PushDrawable(pdev, drawable); + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + + return TRUE; + +punt: + return EngTransparentBlt(dest, src, clip, color_trans, dest_rect, src_rect, trans_color, + reserved); +} + diff --git a/display/rop.h b/display/rop.h new file mode 100644 index 0000000..7519e63 --- /dev/null +++ b/display/rop.h @@ -0,0 +1,35 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _H_ROP +#define _H_ROP + +#define ROP3_DEST (1 << 0) +#define ROP3_SRC (1 << 1) +#define ROP3_BRUSH (1 << 2) +#define ROP3_ALL (ROP3_DEST | ROP3_SRC | ROP3_BRUSH) + +typedef struct ROP3Info { + UINT8 effect; + UINT8 flags; + UINT32 method_type; + UINT16 method_data; +} ROP3Info; + +extern ROP3Info rops2[]; + +#endif diff --git a/display/sources b/display/sources new file mode 100644 index 0000000..f6c339b --- /dev/null +++ b/display/sources @@ -0,0 +1,34 @@ +TARGETNAME=qxldd
+TARGETPATH=obj
+TARGETTYPE=GDI_DRIVER
+
+!IFNDEF MSC_WARNING_LEVEL
+MSC_WARNING_LEVEL=/W3
+!ENDIF
+
+MSC_WARNING_LEVEL=$(MSC_WARNING_LEVEL) /WX
+
+INCLUDES=$(DDK_INC_PATH); ..\include; $(SPICE_COMMON_DIR);
+
+# todo: add ntoskrnl.lib for 2008 build
+
+TARGETLIBS = $(DDK_LIB_PATH)\ntstrsafe.lib
+
+!IFNDEF DEBUG
+MSC_OPTIMIZATION = /Ox
+!ENDIF
+
+C_DEFINES = $(C_DEFINES) /DQXLDD
+
+
+SOURCES=driver.c \
+ rop.c \
+ res.c \
+ text.c \
+ pointer.c \
+ brush.c \
+ mspace.c \
+ quic.c \
+ lookup3.c \
+ driver.rc
+
diff --git a/display/text.c b/display/text.c new file mode 100644 index 0000000..261c0c7 --- /dev/null +++ b/display/text.c @@ -0,0 +1,116 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include "os_dep.h" +#include "qxldd.h" +#include "utils.h" +#include "res.h" +#include "rop.h" + +BOOL APIENTRY DrvTextOut(SURFOBJ *surf, STROBJ *str, FONTOBJ *font, CLIPOBJ *clip, + RECTL *ignored, RECTL *opaque_rect, + BRUSHOBJ *fore_brush, BRUSHOBJ *back_brash, + POINTL *brushs_origin, MIX mix) +{ + QXLDrawable *drawable; + ROP3Info *fore_rop; + ROP3Info *back_rop; + PDev* pdev; + RECTL area; + + if (!(pdev = (PDev *)surf->dhpdev)) { + DEBUG_PRINT((NULL, 0, "%s: err no pdev\n", __FUNCTION__)); + return FALSE; + } + + CountCall(pdev, CALL_COUNTER_TEXT_OUT); + + DEBUG_PRINT((pdev, 3, "%s\n", __FUNCTION__)); + ASSERT(pdev, opaque_rect == NULL || + (opaque_rect->left < opaque_rect->right && opaque_rect->top < opaque_rect->bottom)); + ASSERT(pdev, surf && str && font && clip); + + if (opaque_rect) { + CopyRect(&area, opaque_rect); + } else { + CopyRect(&area, &str->rclBkGround); + } + + if (clip) { + if (clip->iDComplexity == DC_TRIVIAL) { + clip = NULL; + } else { + SectRect(&clip->rclBounds, &area, &area); + if (IsEmptyRect(&area)) { + DEBUG_PRINT((pdev, 1, "%s: empty rect after clip\n", __FUNCTION__)); + return TRUE; + } + } + } + + if (!(drawable = Drawable(pdev, QXL_DRAW_TEXT, &area, clip))) { + return FALSE; + } + + if (opaque_rect) { + ASSERT(pdev, back_brash && brushs_origin); + if (!QXLGetBrush(pdev, drawable, &drawable->u.text.back_brush, back_brash, brushs_origin)) { + goto error; + } + CopyRect(&drawable->u.text.back_area, &area); + drawable->u.text.back_mode = ROPD_OP_PUT; + drawable->effect = QXL_EFFECT_OPAQUE; + } else { + drawable->u.text.back_brush.type = BRUSH_TYPE_NONE; + RtlZeroMemory(&drawable->u.text.back_area, sizeof(drawable->u.text.back_area)); + drawable->u.text.back_mode = 0; + drawable->effect = QXL_EFFECT_BLEND; + } + + fore_rop = &rops2[(mix - 1) & 0x0f]; + back_rop = &rops2[((mix >> 8) - 1) & 0x0f]; + + if (!((fore_rop->flags | back_rop->flags) & ROP3_BRUSH)) { + drawable->u.stroke.brush.type = BRUSH_TYPE_NONE; + } else if (!QXLGetBrush(pdev, drawable, &drawable->u.text.fore_brush, fore_brush, + brushs_origin)) { + DEBUG_PRINT((pdev, 0, "%s: get brush failed\n", __FUNCTION__)); + goto error; + } + + if (fore_rop->method_data != back_rop->method_data && back_rop->method_data) { + DEBUG_PRINT((pdev, 0, "%s: ignoring back rop, fore %u back %u\n", + __FUNCTION__, + (UINT32)fore_rop->method_data, + (UINT32)back_rop->method_data)); + } + drawable->u.text.fore_mode = fore_rop->method_data; + + if (!QXLGetStr(pdev, drawable, &drawable->u.text.str, font, str)) { + DEBUG_PRINT((pdev, 0, "%s: get str failed\n", __FUNCTION__)); + goto error; + } + + PushDrawable(pdev, drawable); + DEBUG_PRINT((pdev, 4, "%s: done\n", __FUNCTION__)); + return TRUE; + +error: + ReleaseOutput(pdev, drawable->release_info.id); + DEBUG_PRINT((pdev, 4, "%s: error\n", __FUNCTION__)); + return FALSE; +} diff --git a/display/utils.h b/display/utils.h new file mode 100644 index 0000000..74f3476 --- /dev/null +++ b/display/utils.h @@ -0,0 +1,113 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _H_UTILS +#define _H_UTILS + +#define MIN(x, y) (((x) <= (y)) ? (x) : (y)) +#define MAX(x, y) (((x) >= (y)) ? (x) : (y)) +#define ALIGN(a, b) (((a) + ((b) - 1)) & ~((b) - 1)) + + +#define OFFSETOF(type, member) ((unsigned)&((type *)0)->member) +#define CONTAINEROF(ptr, type, member) \ + ((type *) ((UINT8 *)(ptr) - OFFSETOF(type, member))) + +static __inline BOOL IsEmptyRect(RECTL *r) +{ + return r->left >= r->right || r->top >= r->bottom; +} + +static __inline void SectRect(RECTL *r1, RECTL *r2, RECTL *dest) +{ + dest->top = MAX(r1->top, r2->top); + dest->bottom = MAX(MIN(r1->bottom, r2->bottom), dest->top); + + dest->left = MAX(r1->left, r2->left); + dest->right = MAX(MIN(r1->right, r2->right), dest->left); +} + +static _inline LONG RectSize(RECTL *rect) +{ + return (rect->right - rect->left) * (rect->bottom - rect->top); +} + +#define SameRect(r1, r2) ((r1)->left == (r2)->left && (r1)->right == (r2)->right && \ + (r1)->top == (r2)->top && (r1)->bottom == (r2)->bottom) + +#define CopyRect(dest, src) \ + (dest)->top = (src)->top; \ + (dest)->left = (src)->left; \ + (dest)->bottom = (src)->bottom; \ + (dest)->right = (src)->right; + +#define CopyPoint(dest, src) \ + (dest)->x = (src)->x; \ + (dest)->y = (src)->y; + +static __inline void FXToRect(RECTL *dest, RECTFX *src) +{ + dest->left = src->xLeft >> 4; + dest->top = src->yTop >> 4; + dest->right = ALIGN(src->xRight, 16) >> 4; + dest->bottom = ALIGN(src->yBottom, 16) >> 4; + +} + +static _inline int test_bit(void* addr, int bit) +{ + return !!(((UINT32 *)addr)[bit >> 5] & (1 << (bit & 0x1f))); +} + +static _inline int test_bit_be(void* addr, int bit) +{ + return !!(((UINT8 *)addr)[bit >> 3] & (0x80 >> (bit & 0x07))); +} + +static _inline BOOL PrepareBrush(BRUSHOBJ *brush) +{ + if (!brush || brush->iSolidColor != ~0 || brush->pvRbrush) { + return TRUE; + } + return BRUSHOBJ_pvGetRbrush(brush) != NULL; +} + +static _inline BOOL IsCacheableSurf(SURFOBJ *surf, XLATEOBJ *color_trans) +{ + return surf->iUniq && !(surf->fjBitmap & BMF_DONTCACHE) && + (!color_trans || color_trans->iUniq); +} + +static _inline UINT32 _16bppTo32bpp(UINT32 color) +{ + UINT32 ret; + + ret = ((color & 0x001f) << 3) | ((color & 0x001c) >> 2); + ret |= ((color & 0x03e0) << 6) | ((color & 0x0380) << 1); + ret |= ((color & 0x7c00) << 9) | ((color & 0x7000) << 4); + + return ret; +} + +static _inline BOOL IsUniqueSurf(SURFOBJ *surf, XLATEOBJ *color_trans) +{ + int pallette = color_trans && (color_trans->flXlate & XO_TABLE); + return surf->iUniq && (surf->fjBitmap & BMF_DONTCACHE) && (!pallette || color_trans->iUniq); +} + +#endif + |