diff options
-rw-r--r-- | drivers/gpu/drm/Kconfig | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/lookup3.c | 765 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/lookup3.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_cmd.c | 335 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_debugfs.c | 439 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_dev.h | 782 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_display.c | 744 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_draw.c | 468 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_drv.c | 129 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_drv.h | 499 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_dumb.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_fb.c | 784 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_gem.c | 157 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_image.c | 313 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_irq.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_kms.c | 521 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.c | 225 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_object.h | 77 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_ttm.c | 449 | ||||
-rw-r--r-- | include/drm/drm_pciids.h | 5 |
21 files changed, 6844 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 90e28081712..5f47fbdfde8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -193,3 +193,14 @@ source "drivers/gpu/drm/ast/Kconfig" source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig" + +config DRM_QXL + tristate "QXL virtual GPU" + depends on DRM && PCI + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select DRM_KMS_HELPER + select DRM_TTM + help + QXL virtual GPU for Spice virtualization desktop integration. diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index f65f65ed0dd..a730f34520d 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -45,4 +45,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ +obj-$(CONFIG_DRM_QXL) += qxl/ obj-y += i2c/ diff --git a/drivers/gpu/drm/qxl/lookup3.c b/drivers/gpu/drm/qxl/lookup3.c new file mode 100644 index 00000000000..6f9bd627fd7 --- /dev/null +++ b/drivers/gpu/drm/qxl/lookup3.c @@ -0,0 +1,765 @@ +/* +------------------------------------------------------------------------------- +lookup3.c, by Bob Jenkins, May 2006, Public Domain. + +These are functions for producing 32-bit hashes for hash table lookup. +hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() +are externally useful functions. Routines to test the hash are included +if SELF_TEST is defined. You can use this free for any purpose. It's in +the public domain. It has no warranty. + +You probably want to use hashlittle(). hashlittle() and hashbig() +hash byte arrays. hashlittle() is is faster than hashbig() on +little-endian machines. Intel and AMD are little-endian machines. +On second thought, you probably want hashlittle2(), which is identical to +hashlittle() except it returns two 32-bit hashes for the price of one. +You could implement hashbig2() if you wanted but I haven't bothered here. + +If you want to find a hash of, say, exactly 7 integers, do + a = i1; b = i2; c = i3; + mix(a,b,c); + a += i4; b += i5; c += i6; + mix(a,b,c); + a += i7; + final(a,b,c); +then use c as the hash value. If you have a variable length array of +4-byte integers to hash, use hashword(). If you have a byte array (like +a character string), use hashlittle(). If you have several byte arrays, or +a mix of things, see the comments above hashlittle(). + +Why is this so big? I read 12 bytes at a time into 3 4-byte integers, +then mix those integers. This is fast (you can do a lot more thorough +mixing with 12*3 instructions on 3 integers than you can with 3 instructions +on 1 byte), but shoehorning those bytes into integers efficiently is messy. +------------------------------------------------------------------------------- +*/ + +#include <linux/types.h> +#include "lookup3.h" + +/* + * My best guess at if you are big-endian or little-endian. This may + * need adjustment. + */ +#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ + __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(i386) || defined(__i386__) || defined(__i486__) || \ + defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ + __BYTE_ORDER == __BIG_ENDIAN) || \ + (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +#else +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 0 +#endif + +#define hashsize(n) ((uint32_t)1<<(n)) +#define hashmask(n) (hashsize(n)-1) +#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) + +/* +------------------------------------------------------------------------------- +mix -- mix 3 32-bit values reversibly. + +This is reversible, so any information in (a,b,c) before mix() is +still in (a,b,c) after mix(). + +If four pairs of (a,b,c) inputs are run through mix(), or through +mix() in reverse, there are at least 32 bits of the output that +are sometimes the same for one pair and different for another pair. +This was tested for: +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that +satisfy this are + 4 6 8 16 19 4 + 9 15 3 18 27 15 + 14 9 3 7 17 3 +Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing +for "differ" defined as + with a one-bit base and a two-bit delta. I +used http://burtleburtle.net/bob/hash/avalanche.html to choose +the operations, constants, and arrangements of the variables. + +This does not achieve avalanche. There are input bits of (a,b,c) +that fail to affect some output bits of (a,b,c), especially of a. The +most thoroughly mixed value is c, but it doesn't really even achieve +avalanche in c. + +This allows some parallelism. Read-after-writes are good at doubling +the number of bits affected, so the goal of mixing pulls in the opposite +direction as the goal of parallelism. I did what I could. Rotates +seem to cost as much as shifts on every machine I could lay my hands +on, and rotates are much kinder to the top and bottom bits, so I used +rotates. +------------------------------------------------------------------------------- +*/ +#define mix(a,b,c) \ +{ \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c,16); c += b; \ + b -= a; b ^= rot(a,19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} + +/* +------------------------------------------------------------------------------- +final -- final mixing of 3 32-bit values (a,b,c) into c + +Pairs of (a,b,c) values differing in only a few bits will usually +produce values of c that look totally different. This was tested for +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +These constants passed: + 14 11 25 16 4 14 24 + 12 14 25 16 4 14 24 +and these came close: + 4 8 15 26 3 22 24 + 10 8 15 26 3 22 24 + 11 8 15 26 3 22 24 +------------------------------------------------------------------------------- +*/ +#define final(a,b,c) \ +{ \ + c ^= b; c -= rot(b,14); \ + a ^= c; a -= rot(c,11); \ + b ^= a; b -= rot(a,25); \ + c ^= b; c -= rot(b,16); \ + a ^= c; a -= rot(c,4); \ + b ^= a; b -= rot(a,14); \ + c ^= b; c -= rot(b,24); \ +} + +/* +-------------------------------------------------------------------- + This works on all machines. To be useful, it requires + -- that the key be an array of uint32_t's, and + -- that the length be the number of uint32_t's in the key + + The function hashword() is identical to hashlittle() on little-endian + machines, and identical to hashbig() on big-endian machines, + except that the length has to be measured in uint32_ts rather than in + bytes. hashlittle() is more complicated than hashword() only because + hashlittle() has to dance around fitting the key bytes into registers. +-------------------------------------------------------------------- +*/ +uint32_t hashword( +const uint32_t *k, /* the key, an array of uint32_t values */ +size_t length, /* the length of the key, in uint32_ts */ +uint32_t initval) /* the previous hash, or an arbitrary value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + return c; +} + + +/* +-------------------------------------------------------------------- +hashword2() -- same as hashword(), but take two seeds and return two +32-bit values. pc and pb must both be nonnull, and *pc and *pb must +both be initialized with seeds. If you pass in (*pb)==0, the output +(*pc) will be the same as the return value from hashword(). +-------------------------------------------------------------------- +*/ +void hashword2 ( +const uint32_t *k, /* the key, an array of uint32_t values */ +size_t length, /* the length of the key, in uint32_ts */ +uint32_t *pc, /* IN: seed OUT: primary hash value */ +uint32_t *pb) /* IN: more seed OUT: secondary hash value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc; + c += *pb; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + *pc=c; *pb=b; +} + + +/* +------------------------------------------------------------------------------- +hashlittle() -- hash a variable-length key into a 32-bit value + k : the key (the unaligned variable-length array of bytes) + length : the length of the key, counting by bytes + initval : can be any 4-byte value +Returns a 32-bit value. Every bit of the key affects every bit of +the return value. Two keys differing by one or two bits will have +totally different hash values. + +The best hash table sizes are powers of 2. There is no need to do +mod a prime (mod is sooo slow!). If you need less than 32 bits, +use a bitmask. For example, if you need only 10 bits, do + h = (h & hashmask(10)); +In which case, the hash table should have hashsize(10) elements. + +If you are hashing n strings (uint8_t **)k, do it like this: + for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h); + +By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this +code any way you wish, private, educational, or commercial. It's free. + +Use for hash table lookup, or anything where one collision in 2^^32 is +acceptable. Do NOT use for cryptographic purposes. +------------------------------------------------------------------------------- +*/ + +uint32_t hashlittle( const void *key, size_t length, uint32_t initval) +{ + uint32_t a,b,c; /* internal state */ + union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length requires no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + + +/* + * hashlittle2: return 2 32-bit hash values + * + * This is identical to hashlittle(), except it returns two 32-bit hash + * values instead of just one. This is good enough for hash table + * lookup with 2^^64 buckets, or if you want a second hash if you're not + * happy with the first, or if you want a probably-unique 64-bit ID for + * the key. *pc is better mixed than *pb, so use *pc first. If you want + * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)". + */ +void hashlittle2( + const void *key, /* the key to hash */ + size_t length, /* length of the key */ + uint32_t *pc, /* IN: primary initval, OUT: primary hash */ + uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */ +{ + uint32_t a,b,c; /* internal state */ + union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc; + c += *pb; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + } + + final(a,b,c); + *pc=c; *pb=b; +} + + + +/* + * hashbig(): + * This is the same as hashword() on big-endian machines. It is different + * from hashlittle() on all machines. hashbig() takes advantage of + * big-endian byte ordering. + */ +uint32_t hashbig( const void *key, size_t length, uint32_t initval) +{ + uint32_t a,b,c; + union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; + + u.ptr = key; + if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]<<8" actually reads beyond the end of the string, but + * then shifts out the part it's not allowed to read. Because the + * string is aligned, the illegal read is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; + case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; + case 5 : b+=k[1]&0xff000000; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff00; break; + case 2 : a+=k[0]&0xffff0000; break; + case 1 : a+=k[0]&0xff000000; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ + case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ + case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ + case 1 : a+=((uint32_t)k8[0])<<24; break; + case 0 : return c; + } + +#endif /* !VALGRIND */ + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += ((uint32_t)k[0])<<24; + a += ((uint32_t)k[1])<<16; + a += ((uint32_t)k[2])<<8; + a += ((uint32_t)k[3]); + b += ((uint32_t)k[4])<<24; + b += ((uint32_t)k[5])<<16; + b += ((uint32_t)k[6])<<8; + b += ((uint32_t)k[7]); + c += ((uint32_t)k[8])<<24; + c += ((uint32_t)k[9])<<16; + c += ((uint32_t)k[10])<<8; + c += ((uint32_t)k[11]); + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[11]; + case 11: c+=((uint32_t)k[10])<<8; + case 10: c+=((uint32_t)k[9])<<16; + case 9 : c+=((uint32_t)k[8])<<24; + case 8 : b+=k[7]; + case 7 : b+=((uint32_t)k[6])<<8; + case 6 : b+=((uint32_t)k[5])<<16; + case 5 : b+=((uint32_t)k[4])<<24; + case 4 : a+=k[3]; + case 3 : a+=((uint32_t)k[2])<<8; + case 2 : a+=((uint32_t)k[1])<<16; + case 1 : a+=((uint32_t)k[0])<<24; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + diff --git a/drivers/gpu/drm/qxl/lookup3.h b/drivers/gpu/drm/qxl/lookup3.h new file mode 100644 index 00000000000..4dee9f5bf82 --- /dev/null +++ b/drivers/gpu/drm/qxl/lookup3.h @@ -0,0 +1,8 @@ +#ifndef __LOOKUP3_H +#define __LOOKUP3_H + +#include <linux/types.h> + +uint32_t hashlittle( const void *key, size_t length, u32 initval); + +#endif diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c new file mode 100644 index 00000000000..cf96d5ccd8d --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -0,0 +1,335 @@ +/* QXL cmd/ring handling */ + +#include "qxl_drv.h" +#include "qxl_object.h" + +struct ring { + struct qxl_ring_header header; + uint8_t elements[0]; +}; + +struct qxl_ring { + volatile struct ring *ring; + int element_size; + int n_elements; + int prod_notify; +}; + +void qxl_ring_free(struct qxl_ring *ring) +{ + kfree(ring); +} + +struct qxl_ring * +qxl_ring_create(struct qxl_ring_header *header, + int element_size, + int n_elements, + int prod_notify) +{ + struct qxl_ring *ring; + + ring = kmalloc (sizeof(*ring), GFP_KERNEL); + if (!ring) + return NULL; + + ring->ring = (volatile struct ring *)header; + ring->element_size = element_size; + ring->n_elements = n_elements; + ring->prod_notify = prod_notify; + + return ring; +} + +void qxl_ring_push(struct qxl_ring *ring, + const void *new_elt) +{ + volatile struct qxl_ring_header *header = &(ring->ring->header); + volatile uint8_t *elt; + int idx; + + // TODO - wait on interrupt + while (header->prod - header->cons == header->num_items) { + header->notify_on_cons = header->cons + 1; + mb(); + } + + idx = header->prod & (ring->n_elements - 1); + elt = ring->ring->elements + idx * ring->element_size; + + memcpy((void *)elt, new_elt, ring->element_size); + + header->prod++; + + mb(); + + if (header->prod == header->notify_on_prod) + outb (0, ring->prod_notify); +} + +bool qxl_ring_pop (struct qxl_ring *ring, + void *element) +{ + volatile struct qxl_ring_header *header = &(ring->ring->header); + volatile uint8_t *ring_elt; + int idx; + + if (header->cons == header->prod) + return false; + + idx = header->cons & (ring->n_elements - 1); + ring_elt = ring->ring->elements + idx * ring->element_size; + + memcpy (element, (void *)ring_elt, ring->element_size); + + header->cons++; + + return true; +} + +void qxl_ring_wait_idle (struct qxl_ring *ring) +{ + // TODO: wait on interrupt + while (ring->ring->header.cons != ring->ring->header.prod) + { + msleep (1); + mb(); + } +} + +void qxl_bo_free(struct qxl_bo *bo) +{ + int ret; + ret = qxl_bo_reserve(bo, false); + if (!ret) { + qxl_bo_kunmap(bo); + qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + } + drm_gem_object_release(&bo->gem_base); + qxl_bo_unref(&bo); +} + +struct drm_qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, uint64_t id) +{ + struct drm_qxl_release *release; + + release = idr_find(&qdev->release_idr, id); + if (!release) { + DRM_ERROR("failed to find id in release_idr\n"); + return NULL; + } + if (release->bo_count < 1) { + DRM_ERROR("read a released resource with 0 bos\n"); + return NULL; + } + //QXL_INFO(qdev, "got release %d bos\n", release->bo_count); + return release; +} + +int qxl_garbage_collect(struct qxl_device *qdev) +{ + struct drm_qxl_release *release; + uint64_t id; + int i = 0; + union qxl_release_info *info; + + mutex_lock(&qdev->release_idr_mutex); + while (qxl_ring_pop (qdev->release_ring, &id)) { + QXL_INFO(qdev, "popped %lld\n", id); + while (id) { + release = qxl_release_from_id_locked(qdev, id); + if (release == NULL) { + break; + } + info = (union qxl_release_info*)release->bos[0]->kptr; + //struct qxl_cursor_cmd *cmd = (struct qxl_cursor_cmd *)info; + QXL_INFO(qdev, "popped %lld, next %lld\n", id, info->next); + + switch (release->type) { + case QXL_RELEASE_DRAWABLE: + case QXL_RELEASE_SURFACE_CMD: + case QXL_RELEASE_CURSOR_CMD: + break; + default: + DRM_ERROR("unexpected release type\n"); + break; + } +#if 0 + if (is_cursor && cmd->type == QXL_CURSOR_SET) { + struct qxl_cursor *cursor = (void *)qxl_virtual_address + (qdev, (void *)cmd->u.set.shape); + qxl_free(qdev->mem, cursor); + } else if (!is_cursor && drawable->type == QXL_DRAW_COPY) { + struct qxl_image *image = qxl_virtual_address(qdev, + (void *)drawable->u.copy.src_bitmap); + qxl_image_destroy (qdev, image); + } +#endif + id = info->next; + qxl_release_free_locked(qdev, release); + ++i; + } + } + mutex_unlock(&qdev->release_idr_mutex); + QXL_INFO(qdev, "%s: %lld\n", __FUNCTION__, i); + + return i; +} + +/* create and pin bo */ +static struct qxl_bo *qxl_create_pinned_bo(struct qxl_device *qdev, + unsigned long size) +{ + struct qxl_bo *bo; + int ret; + + ret = qxl_bo_create(qdev, size, false /* not kernel - device */, + QXL_GEM_DOMAIN_VRAM, &bo); + if (ret) { + DRM_ERROR("failed to allocate VRAM BO\n"); + return NULL; + } + ret = qxl_bo_reserve(bo, false); + if (unlikely(ret != 0)) + goto out_unref; + + ret = qxl_bo_pin(bo, QXL_GEM_DOMAIN_VRAM, NULL); + if (ret) { + DRM_ERROR("failed to pin VRAM BO %d\n", ret); + goto out_unref; + } + + ret = qxl_bo_kmap(bo, NULL); + qxl_bo_unreserve(bo); /* this memory will be reserved via mmap */ + //qxl_io_log(qdev, "%s: gpu offset 0x%llX, kptr 0x%p\n", __func__, + // qxl_bo_gpu_offset(bo), bo->kptr); + if (ret) + goto out_unref; + return bo; +out_unref: + qxl_bo_unref(&bo); + return NULL; +} + +void *qxl_allocnf(struct qxl_device *qdev, unsigned long size, + struct drm_qxl_release *release) +{ + struct qxl_bo *bo; + + qxl_garbage_collect(qdev); + bo = qxl_create_pinned_bo(qdev, size); + qxl_release_add_res(qdev, release, bo); + //QXL_INFO(qdev, "allocated %lu bytes (really TODO)\n", size); + return bo->kptr; +} + +static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) +{ + int irq_num = atomic_read(&qdev->irq_received_io_cmd); + long addr = qdev->io_base + port; + + mutex_lock(&qdev->async_io_mutex); + outb(val, addr); + wait_event_interruptible(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num); + mutex_unlock(&qdev->async_io_mutex); +} + +void qxl_io_update_area(struct qxl_device *qdev, uint32_t surface_id, + const struct qxl_rect *area) +{ + qdev->ram_header->update_area = *area; + qdev->ram_header->update_surface = surface_id; + wait_for_io_cmd(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); +} + +void qxl_io_notify_oom(struct qxl_device *qdev) +{ + // TODO: async (add lock, implement interrupt, wait on lock) + outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM); +} + +void qxl_io_update_screen(struct qxl_device *qdev) +{ + struct qxl_rect area; + u32 height, width; + + height = qdev->fbdev_qfb->base.height; + width = qdev->fbdev_qfb->base.width; + QXL_INFO(qdev, "%s: bad bad bad %dx%d\n", __func__, + width, height); + area.left = area.top = 0; + area.right = width; + area.bottom = height; + + qxl_io_update_area(qdev, 0, &area); +} + +void qxl_io_destroy_primary(struct qxl_device *qdev) +{ + if (!qdev->primary_created) { + return; + } + wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); + qdev->primary_created = 0; +} + +void qxl_io_create_primary(struct qxl_device *qdev, unsigned width, unsigned height) +{ + struct qxl_surface_create *create; + int32_t stride = width * 4; + + QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev, + qdev->ram_header); + create = &qdev->ram_header->create_surface; + create->format = SPICE_SURFACE_FMT_32_xRGB; + create->width = width; + create->height = height; + // TODO? stride should be positive + //create->stride = stride; + //create->mem = qxl_physical_address(qdev, 0, qdev->main_mem_slot); + create->stride = -stride; + create->mem = qxl_bo_physical_address(qdev, + qdev->surface0_bo, 0, + qdev->main_mem_slot); + QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, + qdev->surface0_bo->kptr); + + create->flags = 0; + create->type = QXL_SURF_TYPE_PRIMARY; + + qdev->primary_created = 1; + qdev->primary_width = width; + qdev->primary_height = height; + + wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); +} + +void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) +{ + QXL_INFO(qdev, "qxl_memslot_add %d\n", id); + wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); +} + +void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); + va_end(args); + // DO not do a DRM output here - this will call printk, which will + // call back into qxl for rendering (qxl_fb) + //DRM_ERROR("%s", qdev->ram_header->log_buf); + outb(0, qdev->io_base + QXL_IO_LOG); +} + +void qxl_io_reset(struct qxl_device *qdev) +{ + outb(0, qdev->io_base + QXL_IO_RESET); +} + +void qxl_io_monitors_config(struct qxl_device *qdev) +{ + wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); +} diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c new file mode 100644 index 00000000000..f19c5acc6d3 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -0,0 +1,439 @@ +/* + * Copyright (C) 2009 Red Hat <bskeggs@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Alon Levy <alevy@redhat.com> + */ + +#include <linux/debugfs.h> + +#include "drmP.h" +#include "qxl_drv.h" +#include "qxl_object.h" + +int qxl_debug_level; +int qxl_debug_disable_fb; + +static void ppm_save(int width, int height, int bytes_per_pixel, int line_size, + int bits_per_pixel, uint8_t *d1, struct seq_file *m) +{ + uint8_t *d; + uint32_t v; + int y, x; + uint8_t r, g, b; + int ret; + char *linebuf, *pbuf; + int rshift = 16; + int gshift = 8; + int bshift = 0; + int rmax = 255; + int gmax = 255; + int bmax = 255; + + DRM_INFO("%s: hwd %d,%d,%d bpp %d line %d\n", __func__, + height, width, bytes_per_pixel, bits_per_pixel, line_size); + seq_printf(m, "P6\n%d %d\n%d\n", width, height, 255); + linebuf = kmalloc(width * 3, GFP_KERNEL); + for(y = 0; y < height; y++) { + d = d1; + pbuf = linebuf; + for(x = 0; x < width; x++) { + if (bits_per_pixel == 32) + v = *(uint32_t *)d; + else + v = (uint32_t) (*(uint16_t *)d); + r = ((v >> rshift) & rmax) * 256 / (rmax + 1); + g = ((v >> gshift) & gmax) * 256 / (gmax + 1); + b = ((v >> bshift) & bmax) * 256 / (bmax + 1); + *pbuf++ = r; + *pbuf++ = g; + *pbuf++ = b; + d += bytes_per_pixel; + } + d1 += line_size; + ret = seq_write(m, linebuf, pbuf - linebuf); + } + kfree(linebuf); +} + +static void ppm_save_qxl_fb(struct qxl_framebuffer *qxl_fb, struct seq_file *m) +{ + struct qxl_bo *qobj = gem_to_qxl_bo(qxl_fb->obj); + int width = qxl_fb->base.width; + int height = qxl_fb->base.height; + int bytes_per_pixel = qxl_fb->base.bits_per_pixel / 8; + int line_size = qxl_fb->base.pitches[0]; + int bits_per_pixel = qxl_fb->base.bits_per_pixel; + uint8_t *d1 = qobj->kptr; + + ppm_save(width, height, bytes_per_pixel, line_size, bits_per_pixel, d1, m); +} + +static int +qxl_debugfs_dumbppm(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct qxl_rect area; + + if (qdev->active_user_framebuffer) { + ppm_save_qxl_fb(qdev->active_user_framebuffer, m); + } else if (qdev->fbdev_qfb) { + area.top = area.left = 0; + area.right = qdev->fbdev_qfb->base.width; + area.bottom = qdev->fbdev_qfb->base.height; + qxl_io_update_area(qdev, 0, &area); + ppm_save_qxl_fb(qdev->fbdev_qfb, m); + } + return 0; +} + +struct release_idr_data { + struct seq_file *m; + int total_bo; +}; + +static int idr_iter_fn(int id, void *p, void *data) +{ + struct release_idr_data *release_data = data; + struct seq_file *m = release_data->m; + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct drm_qxl_release *release = qxl_release_from_id_locked(qdev, id); + + seq_printf(m, "%d, type %d bo %d\n", id, release->type, + release->bo_count); + release_data->total_bo += release->bo_count; + return 0; +} + +static int +qxl_debugfs_release(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct qxl_ring_header *r = &qdev->ram_header->release_ring_hdr; + struct release_idr_data idr_data; + + idr_data.m = m; + idr_data.total_bo = 0; + mutex_lock(&qdev->release_idr_mutex); + idr_for_each(&qdev->release_idr, idr_iter_fn, &idr_data); + mutex_unlock(&qdev->release_idr_mutex); + seq_printf(m, "ring %d [%d,%d] (%d,%d)\n", r->num_items, r->prod, r->cons, + r->notify_on_prod, r->notify_on_cons); + seq_printf(m, "collected %d, release bo's %d / %d ttm\n", + qxl_garbage_collect(qdev), + idr_data.total_bo, + atomic_read(&qdev->mman.bdev.glob->bo_count)); + return 0; +} + +#define DRAW_WIDTH 256 +#define DRAW_HEIGHT 96 +static uint32_t draw_data[DRAW_WIDTH * DRAW_HEIGHT]; +static int draw_line = 0; + +static void +qxl_debugfs_draw_depth(struct seq_file *m, void *data, int depth, + uint32_t *palette, int workqueue) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct qxl_fb_image qxl_fb_image; + struct fb_image *image = &qxl_fb_image.fb_image; + uint32_t stride = DRAW_WIDTH * depth / 32; + uint32_t *p = &draw_data[draw_line * stride]; + int i; + static int color_ind = 0; + int fg[] = {0xaaaaaa, 0xff55ff, 0xddff33}; + + draw_line = (draw_line + 1) % DRAW_HEIGHT; + + for (i = 0 ; i < stride; ++i, ++p) { + *p = ~*p; + } + qxl_fb_image.qdev = qdev; + qxl_fb_image.visual = FB_VISUAL_DIRECTCOLOR; + image->dx = 300; + image->dy = 100; + image->width = DRAW_WIDTH; + image->height = DRAW_HEIGHT; + image->depth = depth; + image->data = (char *)draw_data; + if (depth == 1) { + if (palette) { + memcpy(qxl_fb_image.pseudo_palette, palette, + sizeof(qxl_fb_image.pseudo_palette)); + image->fg_color = 1; + image->bg_color = 0; + } else { + qxl_fb_image.visual = FB_VISUAL_MONO10; + image->fg_color = fg[color_ind]; + image->bg_color = 0; + } + color_ind = (color_ind + 1) % (sizeof(fg) / sizeof(fg[0])); + } + if (workqueue) { + qxl_fb_queue_imageblit(qdev, &qxl_fb_image, NULL, NULL); + } else { + qxl_draw_opaque_fb(&qxl_fb_image, 0); + } +} + +static int +qxl_debugfs_draw_32(struct seq_file *m, void *data) +{ + qxl_debugfs_draw_depth(m, data, 32, NULL, 0); + return 0; +} + +static int +qxl_debugfs_draw_1(struct seq_file *m, void *data) +{ + qxl_debugfs_draw_depth(m, data, 1, NULL, 0); + return 0; +} + +static int +qxl_debugfs_oom(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + + qxl_io_notify_oom(qdev); + return 0; +} + +static int +qxl_debugfs_debug_enable(struct seq_file *m, void *data) +{ + qxl_debug_level = 1; + return 0; +} + +static int +qxl_debugfs_debug_disable(struct seq_file *m, void *data) +{ + qxl_debug_level = -1; + return 0; +} + +static int gem_idr_iter_fn(int id, void *p, void *data) +{ + ++*(int *)data; + return 0; +} + +static int +qxl_debugfs_mem(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + int gem_count = 0; + + seq_printf(m, "ttm bo count %d\n", + atomic_read(&qdev->mman.bdev.glob->bo_count)); + gem_count = 0; + spin_lock(&qdev->ddev->object_name_lock); + idr_for_each(&qdev->ddev->object_name_idr, gem_idr_iter_fn, &gem_count); + spin_unlock(&qdev->ddev->object_name_lock); + seq_printf(m, "gem object_name count %d\n", gem_count); + return 0; +} + +/* test new bo and free of bo */ +static int +qxl_debugfs_bo_test(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct qxl_bo *bo; + int ret; + + ret = qxl_bo_create(qdev, 8192, false /* not kernel, device */, + QXL_GEM_DOMAIN_VRAM, &bo); + if (unlikely(ret != 0)) { + seq_printf(m, "failed to create bo of 8192 bytes\n"); + return 0; + } + // TODO - pin test as well. qxl_allocnf test generally. + qxl_bo_free(bo); + return 0; +} + +/* test new idr and release of idr */ +static int +qxl_debugfs_alloc_test(struct seq_file *m, void *data) +{ + seq_printf(m, "implement me\n"); + return 0; +} + +static int +qxl_debugfs_fb_wq_image(struct seq_file *m, void *data) +{ + uint32_t palette[16] = {0, 0xffffff, 0}; + qxl_debugfs_draw_depth(m, data, 1, palette, 1); + return 0; +} + +static void +set_rect(struct qxl_rect *r, int top, int left, int bottom, int right) +{ + r->top = top; + r->left = left; + r->bottom = bottom; + r->right = right; +} + +static int +qxl_debugfs_fb_wq_fill(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + struct qxl_draw_fill qxl_draw_fill_rec; + + qxl_draw_fill_rec.qdev = qdev; + set_rect(&qxl_draw_fill_rec.rect, 100, 100, 200, 400); + qxl_draw_fill_rec.color = 0x00ff0000; + qxl_draw_fill_rec.rop = SPICE_ROPD_OP_PUT; + qxl_fb_queue_draw_fill(&qxl_draw_fill_rec); + return 0; +} + + +static int +qxl_debugfs_fb_enable(struct seq_file *m, void *data) +{ + qxl_debug_disable_fb = 0; + return 0; +} + +static int +qxl_debugfs_fb_disable(struct seq_file *m, void *data) +{ + qxl_debug_disable_fb = 1; + return 0; +} + +static int +qxl_debugfs_irq_received(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); + seq_printf(m, "%d\n", qdev->irq_received_error); + return 0; +} + +static int +qxl_debugfs_read_client_monitors_config(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + + qxl_display_read_client_monitors_config(qdev); + return 0; +} + +static int +qxl_debugfs_set_client_monitors_config(struct seq_file *m, int width, int height) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + + qxl_alloc_client_monitors_config(qdev, 1); + if (!qdev->client_monitors_config) { + qxl_io_log(qdev, "%s: no memory\n", __func__); + return 0; + } + + qdev->client_monitors_config->count = 1; + qdev->client_monitors_config->heads[0].width = width; + qdev->client_monitors_config->heads[0].height = height; + qdev->client_monitors_config->heads[0].x = 0; + qdev->client_monitors_config->heads[0].y = 0; + qxl_crtc_set_from_client_monitors_config(qdev); + qxl_send_monitors_config(qdev); + drm_sysfs_hotplug_event(qdev->ddev); + return 0; +} + +static int +qxl_debugfs_set_monitor_924_668(struct seq_file *m, void *data) +{ + return qxl_debugfs_set_client_monitors_config(m, 924, 668); +} + +static int +qxl_debugfs_set_monitor_820_620(struct seq_file *m, void *data) +{ + return qxl_debugfs_set_client_monitors_config(m, 820, 620); +} + +static struct drm_info_list qxl_debugfs_list[] = { + { "dumbppm", qxl_debugfs_dumbppm, 0, NULL }, + { "release", qxl_debugfs_release, 0, NULL }, + { "draw32", qxl_debugfs_draw_32, 0, NULL }, + { "draw1", qxl_debugfs_draw_1, 0, NULL }, + { "oom", qxl_debugfs_oom, 0, NULL }, + { "mem", qxl_debugfs_mem, 0, NULL }, + { "bo_test", qxl_debugfs_bo_test, 0, NULL }, + { "alloc_test", qxl_debugfs_alloc_test, 0, NULL }, + { "fb_wq_image", qxl_debugfs_fb_wq_image, 0, NULL }, + { "fb_wq_fill", qxl_debugfs_fb_wq_fill, 0, NULL }, + { "read_client_monitors_config", + qxl_debugfs_read_client_monitors_config, 0, NULL }, + { "mon_924_668", qxl_debugfs_set_monitor_924_668, 0, NULL }, + { "mon_820_620", qxl_debugfs_set_monitor_820_620, 0, NULL }, + /* TODO: read int from user (echo debug_level > /sys/kernel/debugfs/dri/0/debug */ + { "debug_enable", qxl_debugfs_debug_enable, 0, NULL }, + { "debug_disable", qxl_debugfs_debug_disable, 0, NULL }, + { "fb_enable", qxl_debugfs_fb_enable, 0, NULL }, + { "fb_disable", qxl_debugfs_fb_disable, 0, NULL }, + { "irq_received", qxl_debugfs_irq_received, 0, NULL }, +}; +#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list) + +int +qxl_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(qxl_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + return 0; +} + +void +qxl_debugfs_takedown(struct drm_minor *minor) +{ + drm_debugfs_remove_files(qxl_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor); +} diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h new file mode 100644 index 00000000000..03cf11851d4 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -0,0 +1,782 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + + +#ifndef H_QXL_DEV +#define H_QXL_DEV + +#include <linux/types.h> + +/* + * from spice-protocol + * Release 0.10.0 + */ + +/* enums.h */ + +enum SpiceImageType { + SPICE_IMAGE_TYPE_BITMAP, + SPICE_IMAGE_TYPE_QUIC, + SPICE_IMAGE_TYPE_RESERVED, + SPICE_IMAGE_TYPE_LZ_PLT = 100, + SPICE_IMAGE_TYPE_LZ_RGB, + SPICE_IMAGE_TYPE_GLZ_RGB, + SPICE_IMAGE_TYPE_FROM_CACHE, + SPICE_IMAGE_TYPE_SURFACE, + SPICE_IMAGE_TYPE_JPEG, + SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS, + SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB, + SPICE_IMAGE_TYPE_JPEG_ALPHA, + + SPICE_IMAGE_TYPE_ENUM_END +}; + +enum SpiceBitmapFmt { + SPICE_BITMAP_FMT_INVALID, + SPICE_BITMAP_FMT_1BIT_LE, + SPICE_BITMAP_FMT_1BIT_BE, + SPICE_BITMAP_FMT_4BIT_LE, + SPICE_BITMAP_FMT_4BIT_BE, + SPICE_BITMAP_FMT_8BIT, + SPICE_BITMAP_FMT_16BIT, + SPICE_BITMAP_FMT_24BIT, + SPICE_BITMAP_FMT_32BIT, + SPICE_BITMAP_FMT_RGBA, + + SPICE_BITMAP_FMT_ENUM_END +}; + +enum SpiceSurfaceFmt { + SPICE_SURFACE_FMT_INVALID, + SPICE_SURFACE_FMT_1_A, + SPICE_SURFACE_FMT_8_A = 8, + SPICE_SURFACE_FMT_16_555 = 16, + SPICE_SURFACE_FMT_32_xRGB = 32, + SPICE_SURFACE_FMT_16_565 = 80, + SPICE_SURFACE_FMT_32_ARGB = 96, + + SPICE_SURFACE_FMT_ENUM_END +}; + +typedef enum SpiceClipType { + SPICE_CLIP_TYPE_NONE, + SPICE_CLIP_TYPE_RECTS, + + SPICE_CLIP_TYPE_ENUM_END +} SpiceClipType; + +enum SpiceRopd { + SPICE_ROPD_INVERS_SRC = (1 << 0), + SPICE_ROPD_INVERS_BRUSH = (1 << 1), + SPICE_ROPD_INVERS_DEST = (1 << 2), + SPICE_ROPD_OP_PUT = (1 << 3), + SPICE_ROPD_OP_OR = (1 << 4), + SPICE_ROPD_OP_AND = (1 << 5), + SPICE_ROPD_OP_XOR = (1 << 6), + SPICE_ROPD_OP_BLACKNESS = (1 << 7), + SPICE_ROPD_OP_WHITENESS = (1 << 8), + SPICE_ROPD_OP_INVERS = (1 << 9), + SPICE_ROPD_INVERS_RES = (1 << 10), + + SPICE_ROPD_MASK = 0x7ff +}; + +enum SpiceBrushType { + SPICE_BRUSH_TYPE_NONE, + SPICE_BRUSH_TYPE_SOLID, + SPICE_BRUSH_TYPE_PATTERN, + + SPICE_BRUSH_TYPE_ENUM_END +}; + +/* qxl_dev.h */ + +#pragma pack(push,1) + +#define REDHAT_PCI_VENDOR_ID 0x1b36 + +/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ +#define QXL_DEVICE_ID_STABLE 0x0100 + +enum { + QXL_REVISION_STABLE_V04=0x01, + QXL_REVISION_STABLE_V06=0x02, + QXL_REVISION_STABLE_V10=0x03, + QXL_REVISION_STABLE_V12=0x04, +}; + +#define QXL_DEVICE_ID_DEVEL 0x01ff +#define QXL_REVISION_DEVEL 0x01 + +#define QXL_ROM_MAGIC (*(uint32_t*)"QXRO") +#define QXL_RAM_MAGIC (*(uint32_t*)"QXRA") + +enum { + QXL_RAM_RANGE_INDEX, + QXL_VRAM_RANGE_INDEX, + QXL_ROM_RANGE_INDEX, + QXL_IO_RANGE_INDEX, + + QXL_PCI_RANGES +}; + +/* qxl-1 compat: append only */ +enum { + QXL_IO_NOTIFY_CMD, + QXL_IO_NOTIFY_CURSOR, + QXL_IO_UPDATE_AREA, + QXL_IO_UPDATE_IRQ, + QXL_IO_NOTIFY_OOM, + QXL_IO_RESET, + QXL_IO_SET_MODE, /* qxl-1 */ + QXL_IO_LOG, + /* appended for qxl-2 */ + QXL_IO_MEMSLOT_ADD, + QXL_IO_MEMSLOT_DEL, + QXL_IO_DETACH_PRIMARY, + QXL_IO_ATTACH_PRIMARY, + QXL_IO_CREATE_PRIMARY, + QXL_IO_DESTROY_PRIMARY, + QXL_IO_DESTROY_SURFACE_WAIT, + QXL_IO_DESTROY_ALL_SURFACES, + /* appended for qxl-3 */ + QXL_IO_UPDATE_AREA_ASYNC, + QXL_IO_MEMSLOT_ADD_ASYNC, + QXL_IO_CREATE_PRIMARY_ASYNC, + QXL_IO_DESTROY_PRIMARY_ASYNC, + QXL_IO_DESTROY_SURFACE_ASYNC, + QXL_IO_DESTROY_ALL_SURFACES_ASYNC, + QXL_IO_FLUSH_SURFACES_ASYNC, + QXL_IO_FLUSH_RELEASE, + /* appended for qxl-4 */ + QXL_IO_MONITORS_CONFIG_ASYNC, + /* appended for qxl-5 */ + QXL_IO_CLIENT_MONITORS_CONFIG_NUM, + QXL_IO_CLIENT_MONITORS_CONFIG_GET, + QXL_IO_CLIENT_MONITORS_CONFIG_DONE, + QXL_IO_FEATURES_SET, + + QXL_IO_RANGE_SIZE +}; + +typedef uint64_t QXLPHYSICAL; +typedef int32_t QXLFIXED; //fixed 28.4 + +struct qxl_point_fix { + QXLFIXED x; + QXLFIXED y; +}; + +struct qxl_point { + int32_t x; + int32_t y; +}; + +struct qxl_point_1_6 { + int16_t x; + int16_t y; +}; + +struct qxl_rect { + int32_t top; + int32_t left; + int32_t bottom; + int32_t right; +}; + +/* qxl-1 compat: append only */ +struct qxl_rom { + uint32_t magic; + uint32_t id; + uint32_t update_id; + uint32_t compression_level; + uint32_t log_level; + uint32_t mode; /* qxl-1 */ + uint32_t modes_offset; + uint32_t num_io_pages; + uint32_t pages_offset; /* qxl-1 */ + uint32_t draw_area_offset; /* qxl-1 */ + uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */ + uint32_t ram_header_offset; + uint32_t mm_clock; + /* appended for qxl-2 */ + uint32_t n_surfaces; + uint64_t flags; + uint8_t slots_start; + uint8_t slots_end; + uint8_t slot_gen_bits; + uint8_t slot_id_bits; + uint8_t slot_generation; + uint8_t padding[3]; /* Padding to 32bit align */ +}; + +/* qxl-1 compat: fixed */ +struct qxl_mode { + uint32_t id; + uint32_t x_res; + uint32_t y_res; + uint32_t bits; + uint32_t stride; + uint32_t x_mili; + uint32_t y_mili; + uint32_t orientation; +}; + +/* qxl-1 compat: fixed */ +struct qxl_modes { + uint32_t n_modes; + struct qxl_mode modes[0]; +}; + +/* qxl-1 compat: append only */ +enum qxl_cmd_type { + QXL_CMD_NOP, + QXL_CMD_DRAW, + QXL_CMD_UPDATE, + QXL_CMD_CURSOR, + QXL_CMD_MESSAGE, + QXL_CMD_SURFACE, +}; + +/* qxl-1 compat: fixed */ +struct qxl_command { + QXLPHYSICAL data; + uint32_t type; + uint32_t padding; +}; + +#define QXL_COMMAND_FLAG_COMPAT (1<<0) +#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0) + +struct qxl_command_ext { + struct qxl_command cmd; + uint32_t group_id; + uint32_t flags; +}; + +struct qxl_mem_slot { + uint64_t mem_start; + uint64_t mem_end; +}; + +#define QXL_SURF_TYPE_PRIMARY 0 + +#define QXL_SURF_FLAG_KEEP_DATA (1 << 0) + +struct qxl_surface_create { + uint32_t width; + uint32_t height; + int32_t stride; + uint32_t format; + uint32_t position; + uint32_t mouse_mode; + uint32_t flags; + uint32_t type; + QXLPHYSICAL mem; +}; + +#define QXL_COMMAND_RING_SIZE 32 +#define QXL_CURSOR_RING_SIZE 32 +#define QXL_RELEASE_RING_SIZE 8 + +#define QXL_LOG_BUF_SIZE 4096 + +#define QXL_INTERRUPT_DISPLAY (1 << 0) +#define QXL_INTERRUPT_CURSOR (1 << 1) +#define QXL_INTERRUPT_IO_CMD (1 << 2) +#define QXL_INTERRUPT_ERROR (1 << 3) +#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 4) + +#define QXL_GUEST_FEATURE_CLIENT_MONITORS_CONFIG_INTERRUPT (1 << 0) + +struct qxl_ring_header { + uint32_t num_items; + uint32_t prod; + uint32_t notify_on_prod; + uint32_t cons; + uint32_t notify_on_cons; +}; + +/* qxl-1 compat: append only */ +struct qxl_ram_header { + uint32_t magic; + uint32_t int_pending; + uint32_t int_mask; + uint8_t log_buf[QXL_LOG_BUF_SIZE]; + struct qxl_ring_header cmd_ring_hdr; + struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE]; + struct qxl_ring_header cursor_ring_hdr; + struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE]; + struct qxl_ring_header release_ring_hdr; + uint64_t release_ring[QXL_RELEASE_RING_SIZE]; + struct qxl_rect update_area; + /* appended for qxl-2 */ + uint32_t update_surface; + struct qxl_mem_slot mem_slot; + struct qxl_surface_create create_surface; + uint64_t flags; + + /* appended for qxl-4 */ + + /* used by QXL_IO_MONITORS_CONFIG_ASYNC */ + QXLPHYSICAL monitors_config; + uint64_t guest_features; +}; + +union qxl_release_info { + uint64_t id; // in + uint64_t next; // out +}; + +struct qxl_release_info_ext { + union qxl_release_info *info; + uint32_t group_id; +}; + +struct qxl_data_chunk { + uint32_t data_size; + QXLPHYSICAL prev_chunk; + QXLPHYSICAL next_chunk; + uint8_t data[0]; +}; + +struct qxl_message { + union qxl_release_info release_info; + uint8_t data[0]; +}; + +struct qxl_compat_update_cmd { + union qxl_release_info release_info; + struct qxl_rect area; + uint32_t update_id; +}; + +struct qxl_update_cmd { + union qxl_release_info release_info; + struct qxl_rect area; + uint32_t update_id; + uint32_t surface_id; +}; + +struct qxl_cursor_header { + uint64_t unique; + uint16_t type; + uint16_t width; + uint16_t height; + uint16_t hot_spot_x; + uint16_t hot_spot_y; +}; + +struct qxl_cursor { + struct qxl_cursor_header header; + uint32_t data_size; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_CURSOR_SET, + QXL_CURSOR_MOVE, + QXL_CURSOR_HIDE, + QXL_CURSOR_TRAIL, +}; + +#define QXL_CURSUR_DEVICE_DATA_SIZE 128 + +struct qxl_cursor_cmd { + union qxl_release_info release_info; + uint8_t type; + union { + struct { + struct qxl_point_1_6 position; + uint8_t visible; + QXLPHYSICAL shape; + } set; + struct { + uint16_t length; + uint16_t frequency; + } trail; + struct qxl_point_1_6 position; + } u; + uint8_t device_data[QXL_CURSUR_DEVICE_DATA_SIZE]; //todo: dynamic size from rom +}; + +enum { + QXL_DRAW_NOP, + QXL_DRAW_FILL, + QXL_DRAW_OPAQUE, + QXL_DRAW_COPY, + QXL_COPY_BITS, + QXL_DRAW_BLEND, + QXL_DRAW_BLACKNESS, + QXL_DRAW_WHITENESS, + QXL_DRAW_INVERS, + QXL_DRAW_ROP3, + QXL_DRAW_STROKE, + QXL_DRAW_TEXT, + QXL_DRAW_TRANSPARENT, + QXL_DRAW_ALPHA_BLEND, +}; + +struct qxl_raster_glyph { + struct qxl_point render_pos; + struct qxl_point glyph_origin; + uint16_t width; + uint16_t height; + uint8_t data[0]; +}; + +struct qxl_string { + uint32_t data_size; + uint16_t length; + uint16_t flags; + struct qxl_data_chunk chunk; +}; + +struct qxl_copy_bits { + struct qxl_point src_pos; +}; + +enum qxl_effect_type +{ + QXL_EFFECT_BLEND = 0, + QXL_EFFECT_OPAQUE = 1, + QXL_EFFECT_REVERT_ON_DUP = 2, + QXL_EFFECT_BLACKNESS_ON_DUP = 3, + QXL_EFFECT_WHITENESS_ON_DUP = 4, + QXL_EFFECT_NOP_ON_DUP = 5, + QXL_EFFECT_NOP = 6, + QXL_EFFECT_OPAQUE_BRUSH = 7 +}; + +struct qxl_pattern { + QXLPHYSICAL pat; + struct qxl_point pos; +}; + +struct qxl_brush { + uint32_t type; + union { + uint32_t color; + struct qxl_pattern pattern; + } u; +}; + +struct qxl_q_mask { + uint8_t flags; + struct qxl_point pos; + QXLPHYSICAL bitmap; +}; + +struct qxl_fill { + struct qxl_brush brush; + uint16_t rop_descriptor; + struct qxl_q_mask mask; +}; + +struct qxl_opaque { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + struct qxl_brush brush; + uint16_t rop_descriptor; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_copy { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + uint16_t rop_descriptor; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_transparent { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + uint32_t src_color; + uint32_t true_color; +}; + +struct qxl_alpha_blend { + uint16_t alpha_flags; + uint8_t alpha; + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; +}; + +struct qxl_compat_alpha_blend { + uint8_t alpha; + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; +}; + +struct qxl_rop_3 { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + struct qxl_brush brush; + uint8_t rop3; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_line_attr { + uint8_t flags; + uint8_t join_style; + uint8_t end_style; + uint8_t style_nseg; + QXLFIXED width; + QXLFIXED miter_limit; + QXLPHYSICAL style; +}; + +struct qxl_stroke { + QXLPHYSICAL path; + struct qxl_line_attr attr; + struct qxl_brush brush; + uint16_t fore_mode; + uint16_t back_mode; +}; + +struct qxl_text { + QXLPHYSICAL str; + struct qxl_rect back_area; + struct qxl_brush fore_brush; + struct qxl_brush back_brush; + uint16_t fore_mode; + uint16_t back_mode; +}; + +struct qxl_mask { + struct qxl_q_mask mask; +}; + +struct qxl_clip { + uint32_t type; + QXLPHYSICAL data; +}; + +struct qxl_compat_drawable { + union qxl_release_info release_info; + uint8_t effect; + uint8_t type; + uint16_t bitmap_offset; + struct qxl_rect bitmap_area; + struct qxl_rect bbox; + struct qxl_clip clip; + uint32_t mm_time; + union { + struct qxl_fill fill; + struct qxl_opaque opaque; + struct qxl_copy copy; + struct qxl_transparent transparent; + struct qxl_compat_alpha_blend alpha_blend; + struct qxl_copy_bits copy_bits; + struct qxl_copy blend; + struct qxl_rop_3 rop3; + struct qxl_stroke stroke; + struct qxl_text text; + struct qxl_mask blackness; + struct qxl_mask invers; + struct qxl_mask whiteness; + } u; +}; + +struct qxl_drawable { + union qxl_release_info release_info; + uint32_t surface_id; + uint8_t effect; + uint8_t type; + uint8_t self_bitmap; + struct qxl_rect self_bitmap_area; + struct qxl_rect bbox; + struct qxl_clip clip; + uint32_t mm_time; + int32_t surfaces_dest[3]; + struct qxl_rect surfaces_rects[3]; + union { + struct qxl_fill fill; + struct qxl_opaque opaque; + struct qxl_copy copy; + struct qxl_transparent transparent; + struct qxl_alpha_blend alpha_blend; + struct qxl_copy_bits copy_bits; + struct qxl_copy blend; + struct qxl_rop_3 rop3; + struct qxl_stroke stroke; + struct qxl_text text; + struct qxl_mask blackness; + struct qxl_mask invers; + struct qxl_mask whiteness; + } u; +}; + +enum qxl_surface_cmd_type { + QXL_SURFACE_CMD_CREATE, + QXL_SURFACE_CMD_DESTROY, +}; + +struct qxl_surface { + uint32_t format; + uint32_t width; + uint32_t height; + int32_t stride; + QXLPHYSICAL data; +}; + +struct qxl_surface_cmd { + union qxl_release_info release_info; + uint32_t surface_id; + uint8_t type; + uint32_t flags; + union { + struct qxl_surface surface_create; + } u; +}; + +struct qxl_clip_rects { + uint32_t num_rects; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_PATH_BEGIN = (1 << 0), + QXL_PATH_END = (1 << 1), + QXL_PATH_CLOSE = (1 << 3), + QXL_PATH_BEZIER = (1 << 4), +}; + +struct qxl_path_seg { + uint32_t flags; + uint32_t count; + struct qxl_point_fix points[0]; +}; + +struct qxl_path { + uint32_t data_size; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_IMAGE_GROUP_DRIVER, + QXL_IMAGE_GROUP_DEVICE, + QXL_IMAGE_GROUP_RED, + QXL_IMAGE_GROUP_DRIVER_DONT_CACHE, +}; + +struct qxl_image_id { + uint32_t group; + uint32_t unique; +}; + +union qxl_image_id_union { + struct qxl_image_id id; + uint64_t value; +}; + +enum qxl_image_flags { + QXL_IMAGE_CACHE = (1 << 0), + QXL_IMAGE_HIGH_BITS_SET = (1 << 1), +}; + +enum qxl_bitmap_flags { + QXL_BITMAP_DIRECT = (1 << 0), + QXL_BITMAP_UNSTABLE = (1 << 1), + QXL_BITMAP_TOP_DOWN = (1 << 2), // == SPICE_BITMAP_FLAGS_TOP_DOWN +}; + +#define QXL_SET_IMAGE_ID(image, _group, _unique) { \ + (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \ +} + +struct qxl_image_descriptor { + uint64_t id; + uint8_t type; + uint8_t flags; + uint32_t width; + uint32_t height; +}; + +struct qxl_palette { + uint64_t unique; + uint16_t num_ents; + uint32_t ents[0]; +}; + +struct qxl_bitmap { + uint8_t format; + uint8_t flags; + uint32_t x; + uint32_t y; + uint32_t stride; + QXLPHYSICAL palette; + QXLPHYSICAL data; //data[0] ? +}; + +struct qxl_surface_id { + uint32_t surface_id; +}; + +struct qxl_encoder_data { + uint32_t data_size; + uint8_t data[0]; +}; + +struct qxl_image { + struct qxl_image_descriptor descriptor; + union { // variable length + struct qxl_bitmap bitmap; + struct qxl_encoder_data quic; + struct qxl_surface_id surface_image; + } u; +}; + +/* A QXLHead is a single monitor output backed by a QXLSurface. + * x and y offsets are unsigned since they are used in relation to + * the given surface, not the same as the x, y coordinates in the guest + * screen reference frame. */ +struct qxl_head { + uint32_t id; + uint32_t surface_id; + uint32_t width; + uint32_t height; + uint32_t x; + uint32_t y; + uint32_t flags; +}; + +struct qxl_monitors_config { + uint16_t count; + uint16_t max_allowed; /* If it is 0 no fixed limit is given by the driver */ + struct qxl_head heads[0]; +}; + +#pragma pack(pop) + +#endif /* _H_QXL_DEV */ diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c new file mode 100644 index 00000000000..5374e857be0 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -0,0 +1,744 @@ +#include "qxl_drv.h" +#include "drm_crtc_helper.h" + +static void qxl_crtc_set_to_mode(struct qxl_device *qdev, + struct drm_connector *connector, + struct qxl_head *head) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *t; + int width = head->width; + int height = head->height; + + DRM_ERROR("TODO: check against framebuffer size\n"); + if (width < 320 || height < 240) { + qxl_io_log(qdev, "%s: bad head: %dx%d", width, height); + width = 1024; + height = 768; + } + if (width * height * 4 > 16*1024*1024) { + width = 1024; + height = 768; + } + // TODO: go over regular modes and removed preferred? + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) + drm_mode_remove(connector, mode); + mode = drm_cvt_mode(dev, width, height, 60, false, false, false); + mode->type |= DRM_MODE_TYPE_PREFERRED; + mode->status = MODE_OK; + drm_mode_probed_add(connector, mode); + qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height); +} + +void qxl_crtc_set_from_client_monitors_config(struct qxl_device *qdev) +{ + struct drm_connector *connector; + int i; + struct drm_device *dev = qdev->ddev; + + i = 0; + qxl_io_log(qdev, "%s: %d, %d\n", __func__, + dev->mode_config.num_connector, + qdev->monitors_config->count); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (i > qdev->client_monitors_config->count) { + /* crtc will be reported as disabled */ + continue; + } + qxl_crtc_set_to_mode(qdev, connector, + &qdev->client_monitors_config->heads[i]); + ++i; + } +} + +void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) +{ + if (qdev->client_monitors_config && + count > qdev->client_monitors_config->count) { + kfree(qdev->client_monitors_config); + } + if (!qdev->client_monitors_config) { + qdev->client_monitors_config = kzalloc( + sizeof(struct qxl_monitors_config) + + sizeof(struct qxl_head) * count, GFP_KERNEL); + if (!qdev->client_monitors_config) { + qxl_io_log(qdev, + "%s: allocation failure for %u heads\n", + __func__, count); + return; + } + } + qdev->client_monitors_config->count = count; +} + +static void qxl_display_copy_client_monitors_config(struct qxl_device *qdev) +{ + int i; + + BUG_ON(!qdev->monitors_config); + qxl_alloc_client_monitors_config(qdev, qdev->monitors_config->count); + /* we copy max from the client but it isn't used */ + qdev->client_monitors_config->max_allowed = qdev->monitors_config->max_allowed; + for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) { + qdev->client_monitors_config->heads[i] = qdev->monitors_config->heads[i]; + } +} + +void qxl_display_read_client_monitors_config(struct qxl_device *qdev) +{ + int num_monitors; + + num_monitors = inl(qdev->io_base + QXL_IO_CLIENT_MONITORS_CONFIG_NUM); + if (num_monitors > qdev->monitors_config->max_allowed) { + DRM_INFO("client monitors list will be truncated: %d < %d\n", + qdev->monitors_config->max_allowed, num_monitors); + } + qxl_io_log(qdev, "client_monitors_config_num = %d\n", num_monitors); + outb(0, qdev->io_base + QXL_IO_CLIENT_MONITORS_CONFIG_GET); + qxl_display_copy_client_monitors_config(qdev); + qxl_crtc_set_from_client_monitors_config(qdev); + + /* fire off a uevent and let userspace tell us what to do */ + qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n"); + drm_sysfs_hotplug_event(qdev->ddev); + qxl_io_log(qdev, "calling QXL_IO_CLIENT_MONITORS_CONFIG_DONE\n"); + outb(0, qdev->io_base + QXL_IO_CLIENT_MONITORS_CONFIG_DONE); +} + +static int qxl_add_monitors_config_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct qxl_device *qdev = dev->dev_private; + struct qxl_output *output = drm_connector_to_qxl_output(connector); + int h = output->index; + struct drm_display_mode *mode = NULL; + struct qxl_head *head; + + if (!qdev->monitors_config) { + return 0; + } + head = &qdev->monitors_config->heads[h]; + + mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, false); + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + return 1; +} + +static int qxl_add_common_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode = NULL; + int i; + struct mode_size { + int w; + int h; + } common_modes[] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} + }; + + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { + if (common_modes[i].w < 320 || common_modes[i].h < 200) + continue; + + mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); + if (common_modes[i].w == 1024 && common_modes[i].h == 768) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + } + drm_mode_probed_add(connector, mode); + } + return i - 1; +} + +static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ +} + +static void qxl_crtc_destroy(struct drm_crtc *crtc) +{ + struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(qxl_crtc); +} + +static const struct drm_crtc_funcs qxl_crtc_funcs = { +// .cursor_set = qxl_crtc_cursor_set, +// .cursor_move = qxl_crtc_cursor_move, + .gamma_set = qxl_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = qxl_crtc_destroy, +}; + +static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + + if (qxl_fb->obj) { + drm_gem_object_unreference_unlocked(qxl_fb->obj); + } + drm_framebuffer_cleanup(fb); + kfree(qxl_fb); +} + +int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips) +{ + /* TODO: vmwgfx where this was cribbed from had locking. Why? */ + struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + struct qxl_device *qdev = qxl_fb->base.dev->dev_private; + struct drm_clip_rect norect; + struct qxl_bo *qobj; + int inc = 1; + + qobj = gem_to_qxl_bo(qxl_fb->obj); + if (qxl_fb != qdev->active_user_framebuffer) { + DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", + __func__, qxl_fb, qdev->active_user_framebuffer); + } + if (!num_clips) { + num_clips = 1; + clips = &norect; + norect.x1 = norect.y1 = 0; + norect.x2 = fb->width; + norect.y2 = fb->height; + } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { + num_clips /= 2; + inc = 2; /* skip source rects */ + } + + qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, + clips, num_clips, inc); + return 0; +} + +static const struct drm_framebuffer_funcs qxl_fb_funcs = { + .destroy = qxl_user_framebuffer_destroy, + .dirty = qxl_framebuffer_surface_dirty, +// .create_handle = qxl_user_framebuffer_create_handle, +}; + +void +qxl_framebuffer_init(struct drm_device *dev, + struct qxl_framebuffer *qfb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + qfb->obj = obj; + drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs); + drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd); +} + +static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct qxl_device *qdev = dev->dev_private; + + qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n", + __func__, + mode->hdisplay, mode->vdisplay, + adjusted_mode->hdisplay, + adjusted_mode->vdisplay); + return true; +} + +void +qxl_send_monitors_config(struct qxl_device *qdev) +{ + int i; + + BUG_ON(!qdev->ram_header->monitors_config); + + if (qdev->monitors_config->count == 0) { + qxl_io_log(qdev, "%s: 0 monitors??\n", __func__); + return; + } + for (i = 0 ; i < qdev->monitors_config->count ; ++i) { + struct qxl_head *head = &qdev->monitors_config->heads[i]; + + if (head->y > 8192 || head->y < head->x || + head->width > 8192 || head->height > 8192) { + DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", + i, head->width, head->height, + head->x, head->y); + return; + } + } + qxl_io_monitors_config(qdev); +} + +static void qxl_monitors_config_set_single(struct qxl_device *qdev, + unsigned x, unsigned y, + unsigned width, unsigned height) +{ + DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y); + qdev->monitors_config->count = 1; + qdev->monitors_config->heads[0].x = x; + qdev->monitors_config->heads[0].y = y; + qdev->monitors_config->heads[0].width = width; + qdev->monitors_config->heads[0].height = height; +} + +static int qxl_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct qxl_device *qdev = dev->dev_private; + struct qxl_mode *m = (void *)mode->private; + + if (!m) { + // and do we care? + DRM_DEBUG("%dx%d: not a native mode\n", x, y); + } else { + DRM_DEBUG("%dx%d: qxl id %d\n", + mode->hdisplay, mode->vdisplay, m->id); + } + DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n", + x, y, + mode->hdisplay, mode->vdisplay, + adjusted_mode->hdisplay, + adjusted_mode->vdisplay); + + if (!qdev->primary_created) { + qxl_io_create_primary(qdev, mode->hdisplay + x, mode->vdisplay + y); + } else { + int width = x + mode->hdisplay; + int height = y + mode->vdisplay; + if (width > qdev->primary_width || height ) { + if (width < qdev->primary_width) { + width = qdev->primary_width; + } + if (height < qdev->primary_height) { + height = qdev->primary_height; + } + qxl_io_destroy_primary(qdev); + qxl_io_create_primary(qdev, width, height); + } + } + if (qdev->monitors_config->count == 0) { + qxl_monitors_config_set_single(qdev, x, y, + mode->hdisplay, + mode->vdisplay); + } + qdev->mode_set = true; + return 0; +} + +static int +qxl_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + DRM_DEBUG("\n"); + return 0; +} + +static void qxl_crtc_prepare (struct drm_crtc *crtc) +{ + DRM_DEBUG("current: %dx%d+%d+%d (%d). \n", + crtc->mode.hdisplay, crtc->mode.vdisplay, + crtc->x, crtc->y, crtc->enabled); +} + +static void qxl_crtc_commit (struct drm_crtc *crtc) +{ + DRM_DEBUG("\n"); +} + +void qxl_crtc_load_lut(struct drm_crtc *crtc) +{ + DRM_DEBUG("\n"); +} + +static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { + .dpms = qxl_crtc_dpms, + .mode_fixup = qxl_crtc_mode_fixup, + .mode_set = qxl_crtc_mode_set, + .mode_set_base = qxl_crtc_set_base, + .prepare = qxl_crtc_prepare, + .commit = qxl_crtc_commit, + .load_lut = qxl_crtc_load_lut, +}; + +int qdev_crtc_init(struct drm_device *dev, int num_crtc) +{ + struct qxl_crtc *qxl_crtc; + + qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL); + if (!qxl_crtc) + return -ENOMEM; + + drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); + drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); + return 0; +} + +static void qxl_enc_dpms(struct drm_encoder* encoder, int mode) +{ + DRM_DEBUG("\n"); +} + +static bool qxl_enc_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG("\n"); + return true; +} + +static void qxl_enc_prepare(struct drm_encoder *encoder) +{ + DRM_DEBUG("\n"); +} + +static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev, + struct drm_encoder *encoder) +{ + int i; + struct qxl_head *head; + struct drm_display_mode *mode; + + BUG_ON(!encoder); + // TODO: ugly, do better + for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i); + if (encoder->possible_crtcs != (1 << i)) { + DRM_ERROR("encoder has wrong possible_crtcs: %x\n", + encoder->possible_crtcs); + return; + } + if (!qdev->monitors_config || + qdev->monitors_config->max_allowed <= i) { + DRM_ERROR( + "head number too large or missing monitors config: %p, %d", + qdev->monitors_config, + qdev->monitors_config ? + qdev->monitors_config->max_allowed : -1); + return; + } + if (!encoder->crtc) { + DRM_ERROR("missing crtc on encoder %p\n", encoder); + return; + } + if (i != 0) { + DRM_DEBUG("missing for multiple monitors: no head holes\n"); + } + head = &qdev->monitors_config->heads[i]; + head->id = i; + head->surface_id = 0; + if (encoder->crtc->enabled) { + mode = &encoder->crtc->mode; + head->width = mode->hdisplay; + head->height = mode->vdisplay; + head->x = encoder->crtc->x; + head->y = encoder->crtc->y; + if (qdev->monitors_config->count < i + 1) + qdev->monitors_config->count = i + 1; + } else { + head->width = 0; + head->height = 0; + head->x = 0; + head->y = 0; + } + DRM_DEBUG("setting head %d to +%d+%d %dx%d\n", + i, head->x, head->y, head->width, head->height); + head->flags = 0; + // TODO - somewhere else to call this for multiple monitors (config_commit?) + qxl_send_monitors_config(qdev); +} + +static void qxl_enc_commit(struct drm_encoder *encoder) +{ + struct qxl_device *qdev = encoder->dev->dev_private; + + qxl_write_monitors_config_for_encoder(qdev, encoder); + DRM_DEBUG("\n"); +} + +static void qxl_enc_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG("\n"); +} + +#if 0 +static struct qxl_mode * +qxl_find_native_mode(struct qxl_device *qdev, struct drm_display_mode *mode, + int bpp) +{ + int i; + + for (i = 0; i < qdev->mode_info.num_modes; i++) { + struct qxl_mode *m = qdev->mode_info.modes + i; + if (m->x_res == mode->hdisplay && + m->y_res == mode->vdisplay && + m->bits == bpp) + return m; + } + return NULL; +} +#endif + +static int qxl_conn_get_modes(struct drm_connector *connector) +{ + int ret; + struct qxl_device *qdev = connector->dev->dev_private; + + DRM_INFO("monitors_config=%p\n", qdev->monitors_config); + // TODO: what should we do here? only show the configured modes for the device, + // or allow the full list, or both? + if (qdev->monitors_config && qdev->monitors_config->count) { + ret = qxl_add_monitors_config_modes(connector); + } else { + ret = qxl_add_common_modes(connector); + } + return ret; +} + +static int qxl_conn_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO: is this called for user defined modes? (xrandr --add-mode) + * TODO: check that the mode fits in the framebuffer */ + DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, mode->vdisplay, mode->status); + return MODE_OK; +} + +struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) +{ + struct qxl_output *qxl_output = + drm_connector_to_qxl_output(connector); + + DRM_DEBUG("\n"); + return &qxl_output->enc; +} + + +static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { + .dpms = qxl_enc_dpms, + .mode_fixup = qxl_enc_mode_fixup, + .prepare = qxl_enc_prepare, + .mode_set = qxl_enc_mode_set, + .commit = qxl_enc_commit, +}; + +static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { + .get_modes = qxl_conn_get_modes, + .mode_valid = qxl_conn_mode_valid, + .best_encoder = qxl_best_encoder, +}; + +static void qxl_conn_save(struct drm_connector *connector) +{ + DRM_DEBUG("\n"); +} + +static void qxl_conn_restore(struct drm_connector *connector) +{ + DRM_DEBUG("\n"); +} + +static enum drm_connector_status qxl_conn_detect(struct drm_connector *connector, + bool force) +{ + struct qxl_output *output = + drm_connector_to_qxl_output(connector); + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = ddev->dev_private; + int connected; + + // first monitor is always connected + connected = (output->index == 0) || + (qdev->monitors_config && + qdev->monitors_config->count > output->index); + + DRM_DEBUG("\n"); + return connected ? connector_status_connected : connector_status_disconnected; +} + +static int qxl_conn_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ + DRM_DEBUG("\n"); + return 0; +} + +static void qxl_conn_destroy(struct drm_connector *connector) +{ + struct qxl_output *qxl_output = + drm_connector_to_qxl_output(connector); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(qxl_output); +} + +static const struct drm_connector_funcs qxl_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = qxl_conn_save, + .restore = qxl_conn_restore, + .detect = qxl_conn_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = qxl_conn_set_property, + .destroy = qxl_conn_destroy, +}; + +static void qxl_enc_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs qxl_enc_funcs = { + .destroy = qxl_enc_destroy, +}; + +int qdev_output_init(struct drm_device *dev, int num_output) +{ + struct qxl_output *qxl_output; + struct drm_connector *connector; + struct drm_encoder *encoder; + + qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL); + if (!qxl_output) + return -ENOMEM; + + qxl_output->index = num_output; + + connector = &qxl_output->base; + encoder = &qxl_output->enc; + drm_connector_init(dev, &qxl_output->base, + &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); + + drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, + DRM_MODE_ENCODER_VIRTUAL); + + encoder->possible_crtcs = 1 << num_output; + drm_mode_connector_attach_encoder(&qxl_output->base, + &qxl_output->enc); + drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); + drm_connector_helper_add(connector, &qxl_connector_helper_funcs); + + drm_sysfs_connector_add(connector); + return 0; +} + +static struct drm_framebuffer * +qxl_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj; + struct qxl_framebuffer *qxl_fb; + struct qxl_device *qdev = dev->dev_private; + + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); + + DRM_INFO("%s: ENTRY\n", __func__); + + qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL); + if (qxl_fb == NULL) { + return NULL; + } + + qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj); + if (qdev->active_user_framebuffer) { + DRM_INFO("%s: active_user_framebuffer %p -> %p\n", + __func__, + qdev->active_user_framebuffer, qxl_fb); + } + qdev->active_user_framebuffer = qxl_fb; + + return &qxl_fb->base; +} + +static const struct drm_mode_config_funcs qxl_mode_funcs = { + .fb_create = qxl_user_framebuffer_create, +}; + +int qxl_modeset_init(struct qxl_device *qdev) +{ + int i; + int ret; + struct drm_gem_object *gobj; + int max_allowed = QXL_NUM_OUTPUTS; + int monitors_config_size = sizeof(struct qxl_monitors_config) + + max_allowed * sizeof(struct qxl_head); + + drm_mode_config_init(qdev->ddev); + ret = qxl_gem_object_create(qdev, monitors_config_size, 0, + QXL_GEM_DOMAIN_VRAM, + false, false, &gobj); + if (ret) { + DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); + return -ENOMEM; + } + qdev->monitors_config_bo = gem_to_qxl_bo(gobj); + qxl_bo_kmap(qdev->monitors_config_bo, NULL); + qdev->monitors_config = qdev->monitors_config_bo->kptr; + qdev->ram_header->monitors_config = + qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0, + qdev->main_mem_slot); + memset(qdev->monitors_config, 0, monitors_config_size); + qdev->monitors_config->max_allowed = max_allowed; + + qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs; + + /* modes will be validated against the framebuffer size */ + qdev->ddev->mode_config.max_width = 8192; + qdev->ddev->mode_config.max_height = 8192; + + qdev->ddev->mode_config.fb_base = qdev->vram_base; + for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) { + qdev_crtc_init(qdev->ddev, i); + qdev_output_init(qdev->ddev, i); + } + + qdev->mode_info.mode_config_initialized = true; + + /* primary surface must be created by this point, to allow + * issuing command queue commands and having them read by + * spice server. */ + qxl_fbdev_init(qdev); + return 0; +} + +void qxl_modeset_fini(struct qxl_device *qdev) +{ + qxl_fbdev_fini(qdev); + if (qdev->mode_info.mode_config_initialized) { + drm_mode_config_cleanup(qdev->ddev); + qdev->mode_info.mode_config_initialized = false; + } + +} diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c new file mode 100644 index 00000000000..3971e94e8a2 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -0,0 +1,468 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "qxl_drv.h" +#include "qxl_object.h" + +/* returns a pointer to the already allocated qxl_rect array inside + * the qxl_clip_rects. This is *not* the same as the memory allocated + * on the device, it is offset to qxl_clip_rects.chunk.data */ +static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, + struct qxl_drawable *drawable, + unsigned num_clips, + struct drm_qxl_release *release) +{ + struct qxl_clip_rects *dev_clips; + + dev_clips = qxl_allocnf(qdev, sizeof(*dev_clips) + + sizeof(struct qxl_rect) * num_clips, release); + dev_clips->num_rects = num_clips; + dev_clips->chunk.next_chunk = 0; + dev_clips->chunk.prev_chunk = 0; + dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips; + drawable->clip.type = SPICE_CLIP_TYPE_RECTS; + drawable->clip.data = qxl_fb_physical_address(qdev, + release->bos[release->bo_count - 1]); + return (struct qxl_rect *)dev_clips->chunk.data; +} + +uint64_t +qxl_release_alloc(struct qxl_device *qdev, int type, + struct drm_qxl_release **ret) +{ + struct drm_qxl_release *release; + int handle = 0; + size_t size = sizeof(*release); + int idr_ret; + + release = kmalloc(size, GFP_KERNEL); + if (!release) { + DRM_ERROR("Out of memory\n"); + return 0; + } + memset(release, 0, size); + release->type = type; + release->bo_count = 0; +again: + mutex_lock(&qdev->release_idr_mutex); + if (idr_pre_get(&qdev->release_idr, GFP_KERNEL) == 0) { + DRM_ERROR("Out of memory for release idr\n"); + kfree(release); + goto release_fail; + } + idr_ret = idr_get_new_above(&qdev->release_idr, release, 1, &handle); + if (idr_ret == -EAGAIN) { + goto again; + } + if (ret) { + *ret = release; + } + QXL_INFO(qdev, "allocated release %lld\n", handle); + release->id = handle; +release_fail: + mutex_unlock(&qdev->release_idr_mutex); + return handle; +} + +void +qxl_release_free_locked(struct qxl_device *qdev, + struct drm_qxl_release *release) +{ + int i; + + QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, + release->type, release->bo_count); + for (i = 0 ; i < release->bo_count; ++i) { + QXL_INFO(qdev, "release %llx\n", + release->bos[i]->tbo.addr_space_offset - DRM_FILE_OFFSET); + qxl_bo_free(release->bos[i]); + } + idr_remove(&qdev->release_idr, release->id); + kfree(release); +} + +void +qxl_release_add_res(struct qxl_device *qdev, struct drm_qxl_release *release, + struct qxl_bo *bo) +{ + if (release->bo_count >= QXL_MAX_RES) { + DRM_ERROR("exceeded max resource on a drm_qxl_release item\n"); + return; + } + release->bos[release->bo_count++] = bo; +} + +void *qxl_alloc_releasable(struct qxl_device *qdev, unsigned long size, + int type, struct drm_qxl_release **release, + struct qxl_bo **bo) +{ + int idr_ret; + void *ret; + + idr_ret = qxl_release_alloc(qdev, type, release); + //QXL_INFO(qdev, "allocating release_info id = %lld\n", idr_ret); + ret = qxl_allocnf(qdev, size, *release); + ((union qxl_release_info *)ret)->id = idr_ret; + *bo = (*release)->bos[(*release)->bo_count - 1]; + return ret; +} + +static struct qxl_drawable * +make_drawable(struct qxl_device *qdev, int surface, uint8_t type, + const struct qxl_rect *rect/* , pRegion clip */, + struct drm_qxl_release **release, + struct qxl_bo **drawable_bo) +{ + struct qxl_drawable *drawable; + int i; + + drawable = qxl_alloc_releasable(qdev, sizeof(*drawable), + QXL_RELEASE_DRAWABLE, release, + drawable_bo); + + drawable->type = type; + + drawable->surface_id = surface; /* Only primary for now */ + drawable->effect = QXL_EFFECT_OPAQUE; + drawable->self_bitmap = 0; + drawable->self_bitmap_area.top = 0; + drawable->self_bitmap_area.left = 0; + drawable->self_bitmap_area.bottom = 0; + drawable->self_bitmap_area.right = 0; + /* FIXME: add clipping */ + drawable->clip.type = SPICE_CLIP_TYPE_NONE; + + /* + * surfaces_dest[i] should apparently be filled out with the + * surfaces that we depend on, and surface_rects should be + * filled with the rectangles of those surfaces that we + * are going to use. + */ + for (i = 0; i < 3; ++i) + drawable->surfaces_dest[i] = -1; + + if (rect) + drawable->bbox = *rect; + + drawable->mm_time = qdev->rom->mm_clock; + + return drawable; +} + +/* TODO: bo per command is wasteful. add an offset */ +void +qxl_push_command_ring(struct qxl_device *qdev, struct qxl_bo *bo, uint32_t type) +{ + struct qxl_command cmd; + + cmd.type = type; + cmd.data = qxl_fb_physical_address(qdev, bo); + + qxl_ring_push(qdev->command_ring, &cmd); +} + +void +qxl_push_cursor_ring(struct qxl_device *qdev, struct qxl_bo *bo, uint32_t type) +{ + struct qxl_command cmd; + + cmd.type = type; + cmd.data = qxl_fb_physical_address(qdev, bo); + + qxl_ring_push(qdev->cursor_ring, &cmd); +} + +static void +push_drawable(struct qxl_device *qdev, struct qxl_bo *drawable_bo) +{ + qxl_push_command_ring(qdev, drawable_bo, QXL_CMD_DRAW); +} + +static struct qxl_palette *qxl_palette_create_1bit(struct drm_qxl_release *release, + const struct qxl_fb_image *qxl_fb_image) +{ + struct qxl_device *qdev = qxl_fb_image->qdev; + const struct fb_image *fb_image = &qxl_fb_image->fb_image; + uint32_t visual = qxl_fb_image->visual; + const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; + struct qxl_palette *ret; + uint32_t fgcolor, bgcolor; + static uint64_t unique; // we make no attempt to actually set this correctly globaly, + // since that would require tracking all of our palettes. + + ret = qxl_allocnf(qdev, sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, + release); + ret->num_ents = 2; + ret->unique = unique++; + if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { + // NB: this is the only used branch currently. + fgcolor = pseudo_palette[fb_image->fg_color]; + bgcolor = pseudo_palette[fb_image->bg_color]; + } else { + fgcolor = fb_image->fg_color; + bgcolor = fb_image->bg_color; + } + ret->ents[0] = bgcolor; + ret->ents[1] = fgcolor; + return ret; +} + +void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, + int stride /* filled in if 0 */) +{ + struct qxl_device *qdev = qxl_fb_image->qdev; + struct qxl_drawable *drawable; + struct qxl_rect rect; + struct qxl_image *image; + struct qxl_palette *palette; + const struct fb_image *fb_image = &qxl_fb_image->fb_image; + int x = fb_image->dx; + int y = fb_image->dy; + int width = fb_image->width; + int height = fb_image->height; + const char *src = fb_image->data; + int depth = fb_image->depth; + struct drm_qxl_release *release; + struct qxl_bo *image_bo; + struct qxl_bo *drawable_bo; + + //qxl_io_log(qdev, "%s: depth %d, bpp %d, width %d, height %d\n", __func__, fb_image->depth, + // fb_info ? fb_info->var.bits_per_pixel : -1, + // fb_image->width, fb_image->height); + + if (stride == 0) { + stride = depth * width / 8; + } + + rect.left = x; + rect.right = x + width; + rect.top = y; + rect.bottom = y + height; + + drawable = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release, &drawable_bo); + QXL_INFO(qdev, "drawable off %llx, release id %lld\n", + drawable_bo->tbo.addr_space_offset - DRM_FILE_OFFSET, + drawable->release_info.id); + + drawable->u.copy.src_area.top = 0; + drawable->u.copy.src_area.bottom = height; + drawable->u.copy.src_area.left = 0; + drawable->u.copy.src_area.right = width; + + drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT; + drawable->u.copy.scale_mode = 0; + drawable->u.copy.mask.flags = 0; + drawable->u.copy.mask.pos.x = 0; + drawable->u.copy.mask.pos.y = 0; + drawable->u.copy.mask.bitmap = 0; + + image = qxl_image_create( + qdev, release, &image_bo, + (const uint8_t *)src, 0, 0, width, height, depth, stride); + QXL_INFO(qdev, "image_bo offset %llx\n", + image_bo->tbo.addr_space_offset - DRM_FILE_OFFSET); + if (depth == 1) { + struct qxl_bo *palette_bo; + + palette = qxl_palette_create_1bit(release, qxl_fb_image); + palette_bo = release->bos[release->bo_count - 1]; + image->u.bitmap.palette = qxl_fb_physical_address(qdev, palette_bo); + } + drawable->u.copy.src_bitmap = + qxl_fb_physical_address(qdev, image_bo); + + //qxl_io_log(qdev, "%s: 0x%llX\n", __func__, qxl_bo_gpu_offset(drawable_bo)); + push_drawable(qdev, drawable_bo); +} + +#if 0 +static void debug_fill_image(uint8_t *data, long width, long height, long stride) +{ + int x, y; + static int c = 1; + + DRM_INFO("%s: data %p, w %ld, h %ld, stride %ld\n", __func__, data, + width, height, stride); + for (y = height; y > 0; y--) { + //DRM_INFO("%s: %d, data %p\n", __func__, y, data); + for (x = 0; x < width*4; x+=4) + data[x] += c; + data += stride; + } + c += 127; +} +#else +#define debug_fill_image(...) +#endif + +/* push a draw command using the given clipping rectangles as + * the sources from the shadow framebuffer. + * + * Right now implementing with a single draw and a clip list. Clip + * lists are known to be a problem performance wise, this can be solved + * by treating them differently in the server. + */ +void qxl_draw_dirty_fb(struct qxl_device *qdev, + struct qxl_framebuffer *qxl_fb, + struct qxl_bo *bo, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips, int inc) +{ + /* + * TODO: only a single monitor (stole this code from vmwgfx_kms.c) + * vmwgfx command created a blit command for each crt that any of + * the clip rects targeted. + */ + /* + * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should + * send a fill command instead, much cheaper. + * + * See include/drm/drm_mode.h + */ + struct drm_clip_rect *clips_ptr; + int i; + int left, right, top, bottom; + int width, height; + struct qxl_drawable *drawable; + struct qxl_rect drawable_rect; + struct qxl_rect *rects; + struct qxl_image *image; + int stride = qxl_fb->base.pitches[0]; + // depth is not actually interesting, we don't mask with it + int depth = qxl_fb->base.bits_per_pixel; + uint8_t *surface_base = bo->kptr; + struct drm_qxl_release *release; + struct qxl_bo *image_bo; + struct qxl_bo *drawable_bo; + + left = clips->x1; + right = clips->x2; + top = clips->y1; + bottom = clips->y2; + + /* skip the first clip rect */ + for (i = 1, clips_ptr = clips + inc; + i < num_clips; i++, clips_ptr += inc) { + left = min_t(int, left, (int)clips_ptr->x1); + right = max_t(int, right, (int)clips_ptr->x2); + top = min_t(int, top, (int)clips_ptr->y1); + bottom = max_t(int, bottom, (int)clips_ptr->y2); + } + + width = right - left; + height = bottom - top; + drawable_rect.left = left; + drawable_rect.right = right; + drawable_rect.top = top; + drawable_rect.bottom = bottom; + //drawable_rect.left = 0; + //drawable_rect.right = width; + //drawable_rect.top = 0; + //drawable_rect.bottom = height; + drawable = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, &release, &drawable_bo); + rects = drawable_set_clipping(qdev, drawable, num_clips, release); + + drawable->u.copy.src_area.top = 0; + drawable->u.copy.src_area.bottom = height; + drawable->u.copy.src_area.left = 0; + drawable->u.copy.src_area.right = width; + //drawable->u.copy.src_area.top = top; + //drawable->u.copy.src_area.bottom = bottom; + //drawable->u.copy.src_area.left = left; + //drawable->u.copy.src_area.right = right; + + drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT; + drawable->u.copy.scale_mode = 0; + drawable->u.copy.mask.flags = 0; + drawable->u.copy.mask.pos.x = 0; + drawable->u.copy.mask.pos.y = 0; + drawable->u.copy.mask.bitmap = 0; + + //DRM_INFO("%s: left %d, stride %d, top %d, width %d, height %d, depth %d", + // __func__, left, stride, top, width, height, depth); + debug_fill_image((surface_base + (left * 4) + (stride * top)), + width, height, stride); + //DRM_INFO("%s: before qxl_image_create", __func__); + image = qxl_image_create( + qdev, release, &image_bo, surface_base, + left, top, width, height, depth, stride); + //DRM_INFO("%s: image %p", __func__, image); + drawable->u.copy.src_bitmap = qxl_fb_physical_address(qdev, image_bo); + + clips_ptr = clips; + for (i = 0; i < num_clips; i++, clips_ptr += inc) { + rects[i].left = clips_ptr->x1; + rects[i].right = clips_ptr->x2; + rects[i].top = clips_ptr->y1; + rects[i].bottom = clips_ptr->y2; + //DRM_INFO("%s: clip %d: [%d,%d,%d,%d]", __func__, i, clips_ptr->x1, + // clips_ptr->x2, clips_ptr->y1, clips_ptr->y2); + } + + push_drawable(qdev, drawable_bo); +} + + +void qxl_draw_copyarea(struct qxl_device *qdev, + u32 width, u32 height, + u32 sx, u32 sy, + u32 dx, u32 dy) +{ + struct qxl_drawable *drawable; + struct qxl_rect rect; + struct drm_qxl_release *release; + struct qxl_bo *drawable_bo; + + rect.left = dx; + rect.top = dy; + rect.right = dx + width; + rect.bottom = dy + height; + drawable = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release, &drawable_bo); + drawable->u.copy_bits.src_pos.x = sx; + drawable->u.copy_bits.src_pos.y = sy; + + push_drawable(qdev, drawable_bo); +} + +void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) +{ + struct qxl_device *qdev = qxl_draw_fill_rec->qdev; + struct qxl_rect rect = qxl_draw_fill_rec->rect; + uint32_t color = qxl_draw_fill_rec->color; + uint16_t rop = qxl_draw_fill_rec->rop; + struct qxl_drawable *drawable; + struct drm_qxl_release *release; + struct qxl_bo *drawable_bo; + + drawable = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release, &drawable_bo); + + drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; + drawable->u.fill.brush.u.color = color; + drawable->u.fill.rop_descriptor = rop; + drawable->u.fill.mask.flags = 0; + drawable->u.fill.mask.pos.x = 0; + drawable->u.fill.mask.pos.y = 0; + drawable->u.fill.mask.bitmap = 0; + + push_drawable(qdev, drawable_bo); +} diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c new file mode 100644 index 00000000000..48bb150b5c8 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -0,0 +1,129 @@ +/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */ +/* qxl_drv.c -- QXL driver -*- linux-c -*- + * + * Copyright 2011 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Dave Airlie <airlie@redhat.com> + * Alon Levy <alevy@redhat.com> + */ + +#include <linux/module.h> + +#include "drmP.h" +#include "drm.h" + +#include "qxl_drv.h" + +extern int qxl_max_ioctls; +static struct pci_device_id pciidlist[] = { + { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, + 0xffff00, 0 }, + { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, + 0xffff00, 0 }, + { 0, 0, 0 }, +}; + +static struct drm_driver qxl_driver; +static struct pci_driver qxl_pci_driver; + +static int __devinit +qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + if (pdev->revision < 3) { + DRM_ERROR("qxl too old, doesn't support async io, use xf86-video-qxl in user mode"); + return -EINVAL; // ENODEV ? + } + return drm_get_pci_dev(pdev, ent, &qxl_driver); +} + +static void +qxl_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +static struct pci_driver qxl_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = qxl_pci_probe, + .remove = qxl_pci_remove, +}; + +static const struct file_operations qxl_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .poll = drm_poll, + .fasync = drm_fasync, + .mmap = qxl_mmap, +}; + +static struct drm_driver qxl_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, + .dev_priv_size = 0, + .load = qxl_driver_load, + .unload = qxl_driver_unload, + .dumb_create = qxl_mode_dumb_create, + .dumb_map_offset = qxl_mode_dumb_mmap, + .dumb_destroy = qxl_mode_dumb_destroy, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = qxl_debugfs_init, + .debugfs_cleanup = qxl_debugfs_takedown, +#endif + .gem_init_object = qxl_gem_object_init, + .gem_free_object = qxl_gem_object_free, + .gem_open_object = qxl_gem_object_open, + .gem_close_object = qxl_gem_object_close, + .fops = &qxl_fops, + .ioctls = qxl_ioctls, + .irq_handler = qxl_irq_handler, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = 0, + .minor = 1, + .patchlevel = 0, +}; + +static int __init qxl_init(void) +{ + qxl_driver.num_ioctls = qxl_max_ioctls; + return drm_pci_init(&qxl_driver, &qxl_pci_driver); +} + +static void __exit qxl_exit(void) +{ + drm_pci_exit(&qxl_driver, &qxl_pci_driver); +} + +module_init(qxl_init); +module_exit(qxl_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h new file mode 100644 index 00000000000..ac553961edc --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -0,0 +1,499 @@ +#ifndef QXL_DRV_H +#define QXL_DRV_H + +/* + * Definitions taken from spice-protocol, plus kernel driver specific bits. + */ + +#include <linux/workqueue.h> +#include <linux/firmware.h> +#include <linux/platform_device.h> + +#include "drmP.h" +#include "drm_crtc.h" +#include <ttm/ttm_bo_api.h> +#include <ttm/ttm_bo_driver.h> +#include <ttm/ttm_placement.h> +#include <ttm/ttm_module.h> + +#include "qxl_drm.h" +#include "qxl_dev.h" + +#define DRIVER_AUTHOR "Dave Airlie" + +#define DRIVER_NAME "qxl" +#define DRIVER_DESC "RH QXL" +#define DRIVER_DATE "20120117" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define QXL_INFO_IO + +#define QXL_NUM_OUTPUTS 1 + +extern int qxl_debug_level; +extern int qxl_debug_disable_fb; + +#ifdef QXL_INFO_IO + //DRM_INFO(fmt, __VA_ARGS__); +#define QXL_INFO(qdev, fmt, ...) do { \ + if (qxl_debug_level) { \ + qxl_io_log(qdev, fmt, __VA_ARGS__); \ + } \ + } while (0) +#define QXL_INFO_ONCE(qdev, fmt, ...) do { \ + static int done; \ + if (!done) { \ + done = 1; \ + QXL_INFO(qdev, fmt, __VA_ARGS__); \ + } \ + } while (0) + +#else +#define QXL_INFO(qdev, fmt, ...) DRM_INFO(fmt, __VA_ARGS__) +#define QXL_INFO_ONCE(qdev, fmt, ...) printk_once(fmt, __VA_ARGS) +#endif + + +#define DRM_FILE_OFFSET 0x100000000ULL +#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT) + +struct qxl_bo { + /* Protected by gem.mutex */ + struct list_head list; + /* Protected by tbo.reserved */ + u32 placements[3]; + struct ttm_placement placement; + struct ttm_buffer_object tbo; + struct ttm_bo_kmap_obj kmap; + unsigned pin_count; + void *kptr; + u32 pitch; + int surface_reg; + /* Constant after initialization */ + struct qxl_device *qdev; + struct drm_gem_object gem_base; +}; +#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) + + +struct qxl_fence_driver { + atomic_t seq; + uint32_t last_seq; + wait_queue_head_t queue; + rwlock_t lock; + struct list_head created; + struct list_head emited; + struct list_head signaled; +}; + +struct qxl_fence { + struct qxl_device *qdev; + struct kref kref; + struct list_head list; + uint32_t seq; + unsigned long timeout; + bool emited; + bool signaled; +}; + +int qxl_fence_driver_init(struct qxl_device *qdev); +void qxl_fence_driver_fini(struct qxl_device *qdev); + +struct qxl_gem { + struct mutex mutex; + struct list_head objects; +}; + +struct qxl_crtc { + struct drm_crtc base; +}; + +struct qxl_output { + int index; + struct drm_connector base; + struct drm_encoder enc; +}; + +struct qxl_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj; +}; + +#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) +#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) +#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base) +#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base) + +struct qxl_mman { + struct ttm_bo_global_ref bo_global_ref; + struct drm_global_reference mem_global_ref; + bool mem_global_referenced; + struct ttm_bo_device bdev; +}; + +struct qxl_mode_info { + int num_modes; + struct qxl_mode *modes; + bool mode_config_initialized; + + /* pointer to fbdev info structure */ + struct qxl_fbdev *qfbdev; +}; + + +struct qxl_memslot { + uint8_t generation; + uint64_t start_phys_addr; + uint64_t end_phys_addr; + uint64_t high_bits; +}; + +enum { + QXL_RELEASE_DRAWABLE, + QXL_RELEASE_SURFACE_CMD, + QXL_RELEASE_CURSOR_CMD, +}; + +/* drm_ prefix to differentiate from qxl_release_info in spice-protocol/qxl_dev.h */ +#define QXL_MAX_RES 6 +struct drm_qxl_release { + int id; + int type; + int bo_count; + struct qxl_bo *bos[QXL_MAX_RES]; +}; + +/* all information required for an image blit. used instead + * of fb_image & fb_info to do a single allocation when we need + * to queue the work. */ +struct qxl_fb_image { + struct qxl_device *qdev; + uint32_t pseudo_palette[16]; + struct fb_image fb_image; + uint32_t visual; +}; + +struct qxl_draw_fill { + struct qxl_device *qdev; + struct qxl_rect rect; + uint32_t color; + uint16_t rop; +}; + +enum { + QXL_FB_WORK_ITEM_INVALID, + QXL_FB_WORK_ITEM_IMAGE, + QXL_FB_WORK_ITEM_DRAW_FILL, +}; + +struct qxl_fb_work_item { + struct list_head head; + int type; + union { + struct qxl_fb_image qxl_fb_image; + struct qxl_draw_fill qxl_draw_fill; + }; +}; + +struct qxl_device { + struct device *dev; + struct drm_device *ddev; + struct pci_dev *pdev; + unsigned long flags; + + resource_size_t vram_base, vram_size; + resource_size_t surfaceram_base, surfaceram_size; + resource_size_t rom_base, rom_size; + struct qxl_rom *rom; + + struct qxl_mode *modes; + struct qxl_bo *monitors_config_bo; + struct qxl_monitors_config *monitors_config; + + /* last received client_monitors_config */ + struct qxl_monitors_config *client_monitors_config; + + int io_base; + void *ram; + struct qxl_bo *surface0_bo; + void *surface0_shadow; + struct qxl_mman mman; +// struct qxl_fence_driver fence; + struct qxl_gem gem; + struct qxl_mode_info mode_info; + + // last created framebuffer with fb_create + // only used by debugfs dumbppm + struct qxl_framebuffer *active_user_framebuffer; + + struct fb_info *fbdev_info; + struct qxl_framebuffer *fbdev_qfb; + void *ram_physical; + + struct qxl_ring *release_ring; + struct qxl_ring *command_ring; + struct qxl_ring *cursor_ring; + + struct qxl_ram_header *ram_header; + bool mode_set; + + bool primary_created; + unsigned primary_width; + unsigned primary_height; + + struct qxl_memslot *mem_slots; + uint8_t n_mem_slots; + + uint8_t main_mem_slot; + uint8_t surfaces_mem_slot; + uint8_t slot_id_bits; + uint8_t slot_gen_bits; + uint64_t va_slot_mask; + + uint8_t vram_mem_slot; + + struct idr release_idr; + struct mutex release_idr_mutex; + struct mutex async_io_mutex; + + // framebuffer. workqueue to avoid bo allocation in interrupt context + struct workqueue_struct *fb_workqueue; + spinlock_t fb_workqueue_spinlock; + struct work_struct fb_work; + struct list_head fb_work_item_pending; + struct list_head fb_work_item_free; + struct qxl_fb_work_item fb_work_items[16]; + + // interrupt handling + atomic_t irq_received; + atomic_t irq_received_display; + atomic_t irq_received_cursor; + atomic_t irq_received_io_cmd; + unsigned irq_received_error; + wait_queue_head_t display_event; + wait_queue_head_t cursor_event; + wait_queue_head_t io_cmd_event; + struct work_struct client_monitors_config_work; +}; + +/* forward declaration for QXL_INFO_IO */ +void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); + +extern struct drm_ioctl_desc qxl_ioctls[]; +extern int qxl_max_ioctl; + +int qxl_driver_load(struct drm_device *dev, unsigned long flags); +int qxl_driver_unload(struct drm_device *dev); + +int qxl_modeset_init(struct qxl_device *qdev); +void qxl_modeset_fini(struct qxl_device *qdev); + +int qxl_bo_init(struct qxl_device *qdev); +void qxl_bo_fini(struct qxl_device *qdev); + +struct qxl_ring *qxl_ring_create (struct qxl_ring_header *header, + int element_size, + int n_elements, + int prod_notify); +void qxl_ring_free(struct qxl_ring *ring); +extern void *qxl_allocnf(struct qxl_device *qdev, unsigned long size, + struct drm_qxl_release *release); + +static inline uint64_t +qxl_physical_address(struct qxl_device *qdev, uint64_t kptr) +{ + struct qxl_memslot *slot = &(qdev->mem_slots[qdev->main_mem_slot]); + + return slot->high_bits | kptr; +} + +static inline uint64_t +qxl_fb_physical_address(struct qxl_device *qdev, struct qxl_bo *bo) +{ + uint64_t offset = bo->tbo.offset; + + QXL_INFO(qdev, "physical from tbo %llx (%llx, %llx)\n", + bo->tbo.addr_space_offset, offset, + bo->tbo.offset); + BUG_ON(bo->tbo.addr_space_offset & 0xf000000000000000); + return qxl_physical_address(qdev, offset); +} + +static inline void * +qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical) +{ + QXL_INFO(qdev, "not implemented (%lu)\n", physical); + return 0; +} + +static inline uint64_t +qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, + unsigned long offset, uint8_t slot_id) +{ + struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]); + + // TODO - need to hold one of the locks to read tbo.offset + return slot->high_bits | (bo->tbo.offset + offset); +} + +/* qxl_fb.c */ +#define QXLFB_CONN_LIMIT 1 + +int qxl_fbdev_init(struct qxl_device *qdev); +void qxl_fbdev_fini(struct qxl_device *qdev); +int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, + struct drm_file *file_priv, + uint32_t *handle); + +/* qxl_display.c */ +void +qxl_framebuffer_init(struct drm_device *dev, + struct qxl_framebuffer *rfb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); +void qxl_display_read_client_monitors_config(struct qxl_device *qdev); +void qxl_send_monitors_config(struct qxl_device *qdev); + +/* used by qxl_debugfs only */ +void qxl_crtc_set_from_client_monitors_config(struct qxl_device *qdev); +void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count); + +/* qxl_gem.c */ +int qxl_gem_init(struct qxl_device *qdev); +void qxl_gem_fini(struct qxl_device *qdev); +int qxl_gem_object_create(struct qxl_device *qdev, int size, + int alignment, int initial_domain, + bool discardable, bool kernel, + struct drm_gem_object **obj); +int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, + uint64_t *gpu_addr); +void qxl_gem_object_unpin(struct drm_gem_object *obj); +int qxl_gem_object_create_with_handle(struct qxl_device *qdev, + struct drm_file *file_priv, + u32 domain, + size_t size, + struct qxl_bo **qobj, + uint32_t *handle); +int qxl_gem_object_init(struct drm_gem_object *obj); +void qxl_gem_object_free(struct drm_gem_object *gobj); +int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void qxl_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv); +void qxl_bo_force_delete(struct qxl_device *qdev); +int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); + +/* qxl_dumb.c */ +int qxl_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int qxl_mode_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); +int qxl_mode_dumb_mmap(struct drm_file *filp, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p); + + +/* qxl ttm */ +int qxl_ttm_init(struct qxl_device *qdev); +void qxl_ttm_fini(struct qxl_device *qdev); +int qxl_mmap(struct file *filp, struct vm_area_struct *vma); + +/* qxl image */ + +struct qxl_image *qxl_image_create(struct qxl_device *qdev, + struct drm_qxl_release *release, + struct qxl_bo **image_bo, + const uint8_t *data, + int x, int y, int width, int height, + int depth, int stride); +void qxl_image_destroy(struct qxl_device *qdev, + struct qxl_image *image); +void qxl_update_screen(struct qxl_device *qxl); + +/* qxl io operations (qxl_cmd.c) */ + +void qxl_io_create_primary(struct qxl_device *qdev, + unsigned width, unsigned height); +void qxl_io_destroy_primary(struct qxl_device *qdev); +void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id); +void qxl_io_notify_oom(struct qxl_device *qdev); +void qxl_io_update_area(struct qxl_device *qdev, uint32_t surface_id, + const struct qxl_rect *area); +void qxl_io_reset(struct qxl_device *qdev); +void qxl_io_monitors_config(struct qxl_device *qdev); +void qxl_ring_push(struct qxl_ring *ring, const void *new_elt); + + +/* + * qxl_bo_add_resource. + * + */ +void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); + +/* used both directly via qxl_draw and via ioctl execbuffer */ +void *qxl_alloc_releasable(struct qxl_device *qdev, unsigned long size, + int type, struct drm_qxl_release **release, + struct qxl_bo **bo); +void +qxl_push_command_ring(struct qxl_device *qdev, struct qxl_bo *bo, uint32_t type); +void +qxl_push_cursor_ring(struct qxl_device *qdev, struct qxl_bo *bo, uint32_t type); + +/* qxl drawing commands */ + +void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, + int stride /* filled in if 0 */); + +void qxl_draw_dirty_fb(struct qxl_device *qdev, + struct qxl_framebuffer *qxl_fb, + struct qxl_bo *bo, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips, int inc); + +void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec); + +void qxl_draw_copyarea(struct qxl_device *qdev, + u32 width, u32 height, + u32 sx, u32 sy, + u32 dx, u32 dy); + +uint64_t +qxl_release_alloc(struct qxl_device *qdev, int type, + struct drm_qxl_release **ret); + +void qxl_release_free_locked(struct qxl_device *qdev, + struct drm_qxl_release *release); +void qxl_release_add_res(struct qxl_device *qdev, struct drm_qxl_release *release, + struct qxl_bo *bo); + +/* used by qxl_debugfs_release */ +struct drm_qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, + uint64_t id); + +int qxl_garbage_collect(struct qxl_device *qdev); + +/* debugfs */ + +int qxl_debugfs_init(struct drm_minor *minor); +void qxl_debugfs_takedown(struct drm_minor *minor); + +/* qxl_irq.c */ +int qxl_irq_init(struct qxl_device *qdev); +irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS); + +/* qxl_fb.c */ +int qxl_fb_init(struct qxl_device *qdev); + +/* not static for debugfs */ +int qxl_fb_queue_imageblit(struct qxl_device *qdev, + struct qxl_fb_image *qxl_fb_image, + struct fb_info *info, + const struct fb_image *image); +int qxl_fb_queue_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec); + +#endif diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c new file mode 100644 index 00000000000..79124057e02 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -0,0 +1,62 @@ +#include "qxl_drv.h" +#include "qxl_object.h" + +/* dumb ioctls implementation */ + +int qxl_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct qxl_device *qdev = dev->dev_private; + struct qxl_bo *qobj; + uint32_t handle; + int r; + + args->pitch = args->width * ((args->bpp + 1) / 8); + args->size = args->pitch * args->height; + args->size = ALIGN(args->size, PAGE_SIZE); + + r = qxl_gem_object_create_with_handle(qdev, file_priv, + QXL_GEM_DOMAIN_VRAM, + args->size, &qobj, + &handle); + DRM_INFO("%s: width %d, height %d, bpp %d, pitch %d, size %lld, %s\n", __func__, + args->width, args->height, args->bpp, + args->pitch, args->size, r ? "failed" : "success"); + if (r) + return r; + args->handle = handle; + DRM_INFO("%s: kptr %p\n", __func__, qobj->kptr); + return 0; +} + +int qxl_mode_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} + +int qxl_mode_dumb_mmap(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p) +{ + struct drm_gem_object *gobj; + struct qxl_bo *qobj; + + BUG_ON(!offset_p); + gobj = drm_gem_object_lookup(dev, file_priv, handle); + DRM_INFO("%s: %d, %s\n", __func__, handle, gobj ? "success" : "failed"); + if (gobj == NULL) { + return -ENOENT; + } + qobj = gem_to_qxl_bo(gobj); + *offset_p = qxl_bo_mmap_offset(qobj); + DRM_INFO("%s: %p, %lld| %lld, %ld, %ld\n", __func__, gobj, *offset_p, + qobj->tbo.addr_space_offset, + qobj->tbo.vm_node ? + qobj->tbo.vm_node->start : -1, + qobj->tbo.num_pages); + drm_gem_object_unreference_unlocked(gobj); + return 0; +} diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c new file mode 100644 index 00000000000..c2a9fb51e7b --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -0,0 +1,784 @@ +/* + * Copyright © 2007 David Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * David Airlie + */ + /* + * Modularization + */ + +#include <linux/module.h> +#include <linux/fb.h> + +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "qxl_drm.h" +#include "qxl_drv.h" + +#include "qxl_object.h" +#include "drm_fb_helper.h" + +#define QXL_DIRTY_DELAY (HZ / 30) + +struct qxl_fbdev { + struct drm_fb_helper helper; + struct qxl_framebuffer qfb; + struct list_head fbdev_list; + struct qxl_device *qdev; + + /* dirty memory logging */ + struct { + spinlock_t lock; + bool active; + unsigned x1; + unsigned y1; + unsigned x2; + unsigned y2; + } dirty; +}; + +static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, + struct qxl_device *qdev, struct fb_info *info, + const struct fb_image *image) +{ + qxl_fb_image->qdev = qdev; + if (info) { + qxl_fb_image->visual = info->fix.visual; + if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR || + qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR) { + memcpy(&qxl_fb_image->pseudo_palette, info->pseudo_palette, + sizeof(qxl_fb_image->pseudo_palette)); + } + } else { + /* fallback */ + if (image->depth == 1) { + qxl_fb_image->visual = FB_VISUAL_MONO10; + } else { + qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR; + } + } + if (image) { + memcpy(&qxl_fb_image->fb_image, image, + sizeof(qxl_fb_image->fb_image)); + } +} + +static void qxl_fb_dirty_flush(struct fb_info *info) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_device *qdev = qfbdev->qdev; + struct qxl_fb_image qxl_fb_image; + struct fb_image *image = &qxl_fb_image.fb_image; + u32 x1, x2, y1, y2; + int stride = qfbdev->qfb.base.pitches[0] * 4; /* TODO: hard coding 32 bpp */ + + x1 = qfbdev->dirty.x1; + x2 = qfbdev->dirty.x2; + y1 = qfbdev->dirty.y1; + y2 = qfbdev->dirty.y2; + /* + * we are using a shadow draw buffer, at qdev->surface0_shadow + */ + qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2); + image->dx = x1; + image->dy = y1; + image->width = x2 - x1; + image->height = y2 - y1; + image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized warnings */ + image->bg_color = 0; + image->depth = 32; /* TODO: take from somewhere? */ + image->cmap.start = 0; + image->cmap.len = 0; + image->cmap.red = NULL; + image->cmap.green = NULL; + image->cmap.blue = NULL; + image->cmap.transp = NULL; + image->data = qdev->surface0_shadow + (x1 * 4) + (stride * y1); + + qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); + qxl_draw_opaque_fb(&qxl_fb_image, stride); + qfbdev->dirty.x1 = 0; + qfbdev->dirty.x2 = 0; + qfbdev->dirty.y1 = 0; + qfbdev->dirty.y2 = 0; +} + +static void qxl_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ + struct qxl_fbdev *qfbdev = info->par; + unsigned long start, end, min, max; + struct page *page; + int y1, y2; + + min = ULONG_MAX; + max = 0; + list_for_each_entry(page, pagelist, lru) { + start = page->index << PAGE_SHIFT; + end = start + PAGE_SIZE - 1; + min = min(min, start); + max = max(max, end); + } + + if (min < max) { + y1 = min / info->fix.line_length; + y2 = (max / info->fix.line_length) + 1; + + // TODO: add spin lock? + //spin_lock_irqsave(&qfbdev->dirty.lock, flags); + qfbdev->dirty.x1 = 0; + qfbdev->dirty.y1 = y1; + qfbdev->dirty.x2 = info->var.xres; + qfbdev->dirty.y2 = y2; + //spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); + } + + qxl_fb_dirty_flush(info); +}; + + +struct fb_deferred_io qxl_defio = { + .delay = QXL_DIRTY_DELAY, + .deferred_io = qxl_deferred_io, +}; + +static void qxl_fb_do_work_items(struct qxl_device *qdev); + +static void qxl_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *fb_rect) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_device *qdev = qfbdev->qdev; + struct qxl_rect rect; + uint32_t color; + int x = fb_rect->dx; + int y = fb_rect->dy; + int width = fb_rect->width; + int height = fb_rect->height; + uint16_t rop; + struct qxl_draw_fill qxl_draw_fill_rec; + + if (qxl_debug_disable_fb) { + QXL_INFO_ONCE(qdev, "(skipped) %s:%d\n", __func__, __LINE__); + return; + } + + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR ) + color = ((u32 *) (info->pseudo_palette))[fb_rect->color]; + else + color = fb_rect->color; + rect.left = x; + rect.right = x + width; + rect.top = y; + rect.bottom = y + height; + switch (fb_rect->rop) { + case ROP_XOR: + rop = SPICE_ROPD_OP_XOR; + break; + case ROP_COPY: + rop = SPICE_ROPD_OP_PUT; + break; + default: + printk( KERN_ERR "qxl_fb_fillrect(): unknown rop, defaulting to SPICE_ROPD_OP_PUT\n"); + rop = SPICE_ROPD_OP_PUT; + } + qxl_draw_fill_rec.qdev = qdev; + qxl_draw_fill_rec.rect = rect; + qxl_draw_fill_rec.color = color; + qxl_draw_fill_rec.rop = rop; + if (in_interrupt()) { + unsigned long flags; + qxl_io_log(qdev, "%s: TODO use RCU, mysterious locks with spin_lock\n", + __func__); + return; + spin_lock_irqsave(&qdev->fb_workqueue_spinlock, flags); + if (qxl_fb_queue_draw_fill(&qxl_draw_fill_rec)) { + qxl_io_log(qdev, "%s: failed to queue work item in interrupt context.\n" + "fb_fillrect = %x\n", __FUNCTION__, fb_rect); + } + spin_unlock_irqrestore(&qdev->fb_workqueue_spinlock, flags); + return; + } + /* ensure proper order of rendering operations - TODO: must do this + * for everything. */ + qxl_fb_do_work_items(qdev); + qxl_draw_fill(&qxl_draw_fill_rec); + //cfb_fillrect(info, rect); + //qxl_update_screen(qfbdev->qdev); +} + +static void qxl_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *region) +{ + struct qxl_fbdev *qfbdev = info->par; + + if (qxl_debug_disable_fb) { + QXL_INFO_ONCE(qfbdev->qdev, "skipped %s:%d\n", __func__, __LINE__); + return; + } + qxl_draw_copyarea(qfbdev->qdev, + region->width, region->height, + region->sx, region->sy, + region->dx, region->dy); +} + +#if 0 +static u32 checksum(struct fb_info *info) +{ + u8 *addr; + u32 sum = 0; + // for now checksum the pixmap - later check other stuff + for (addr = info->pixmap.addr; addr < info->pixmap.addr + info->pixmap.size; ++addr) + sum += *addr; + return sum; +} +#endif + +static struct qxl_fb_work_item *get_work_item(struct qxl_device *qdev, int type) +{ + struct qxl_fb_work_item *item; + + if (list_empty(&qdev->fb_work_item_free)) { + // not much we can do, really - log this to qemu at least + return NULL; + } + item = list_first_entry(&qdev->fb_work_item_free, struct qxl_fb_work_item, head); + list_del(&item->head); + item->type = type; + return item; +} + +static void push_fb_work_item(struct qxl_device *qdev, struct qxl_fb_work_item *item) +{ + list_add_tail(&item->head, &qdev->fb_work_item_pending); + queue_work(qdev->fb_workqueue, &qdev->fb_work); +} + +/* not static for debugfs */ +int qxl_fb_queue_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) +{ + struct qxl_device *qdev = qxl_draw_fill_rec->qdev; + struct qxl_fb_work_item *item = get_work_item(qdev, QXL_FB_WORK_ITEM_DRAW_FILL); + + if (!item) { + return -ENOMEM; + } + item->qxl_draw_fill = *qxl_draw_fill_rec; + push_fb_work_item(qdev, item); + return 0; +} + +/* not static for debugfs */ +int qxl_fb_queue_imageblit(struct qxl_device *qdev, + struct qxl_fb_image *qxl_fb_image, + struct fb_info *info, + const struct fb_image *image) +{ + struct qxl_fb_work_item *item = get_work_item(qdev, QXL_FB_WORK_ITEM_IMAGE); + + if (!item) { + return -ENOMEM; + } + if (!qxl_fb_image) { + qxl_fb_image_init(&item->qxl_fb_image, qdev, info, image); + } else { + /* debugfs path */ + memcpy(&item->qxl_fb_image, qxl_fb_image, sizeof(*qxl_fb_image)); + } + push_fb_work_item(qdev, item); + return 0; +} + +static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) +{ + //DRM_INFO("%s:%d %d,%d (%d,%d)\n", __func__, __LINE__, + // image->width, image->height, image->dx, image->dy); + qxl_draw_opaque_fb(qxl_fb_image, 0); +} + +static void qxl_fb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_device *qdev = qfbdev->qdev; + struct qxl_fb_image qxl_fb_image; + + if (qxl_debug_disable_fb) { + QXL_INFO_ONCE(qfbdev->qdev, "%s: skipping\n", __FUNCTION__); + return; + } + if (in_interrupt()) { + /* we cannot do any ttm_bo allocation since that will fail on + * ioremap_wc..__get_vm_area_node, so queue the work item instead + * This can happen from printk inside an interrupt context, i.e.: + * smp_apic_timer_interrupt..check_cpu_stall */ + unsigned long flags; + qxl_io_log(qdev, "%s: TODO use RCU, mysterious locks with spin_lock\n", + __func__); + return; + spin_lock_irqsave(&qdev->fb_workqueue_spinlock, flags); + if (qxl_fb_queue_imageblit(qdev, NULL, info, image)) { + qxl_io_log(qdev, "%s: failed to queue work item in interrupt context.\n" + "info = %x, image = %x\n", __FUNCTION__, info, image); + } + spin_unlock_irqrestore(&qdev->fb_workqueue_spinlock, flags); + return; + } + /* ensure proper order of rendering operations - TODO: must do this + * for everything. */ + qxl_fb_do_work_items(qfbdev->qdev); + qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); + qxl_fb_imageblit_safe(&qxl_fb_image); +} + +static void qxl_fb_do_work_items(struct qxl_device *qdev) +{ + struct qxl_fb_work_item *item; + struct qxl_fb_work_item *n; + unsigned long flags; + + if (in_interrupt()) { + qxl_io_log(qdev, "%s: wtf - should not happen\n", __FUNCTION__); + return; + } + + spin_lock_irqsave(&qdev->fb_workqueue_spinlock, flags); + list_for_each_entry_safe(item, n, &qdev->fb_work_item_pending, head) { + spin_unlock_irqrestore(&qdev->fb_workqueue_spinlock, flags); + switch (item->type) { + case QXL_FB_WORK_ITEM_IMAGE: + qxl_fb_imageblit_safe(&item->qxl_fb_image); + break; + case QXL_FB_WORK_ITEM_DRAW_FILL: + qxl_draw_fill(&item->qxl_draw_fill); + break; + default: + qxl_io_log(qdev, "%s: invalid fb work item type %d\n", item->type); + } + spin_lock_irqsave(&qdev->fb_workqueue_spinlock, flags); + list_del(&item->head); + list_add_tail(&item->head, &qdev->fb_work_item_free); + spin_unlock_irqrestore(&qdev->fb_workqueue_spinlock, flags); + } + spin_unlock_irqrestore(&qdev->fb_workqueue_spinlock, flags); +} + +static void qxl_fb_work(struct work_struct *work) +{ + struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); + + qxl_fb_do_work_items(qdev); +} + +int qxl_fb_init(struct qxl_device *qdev) +{ + int i; + + qdev->fb_workqueue = alloc_workqueue("qxl_fb", WQ_NON_REENTRANT, 0); + if (!qdev->fb_workqueue) + return -ENOMEM; + INIT_LIST_HEAD(&qdev->fb_work_item_pending); + INIT_LIST_HEAD(&qdev->fb_work_item_free); + INIT_WORK(&qdev->fb_work, qxl_fb_work); + spin_lock_init(&qdev->fb_workqueue_spinlock); + /* no kmalloc in interrupt context - not sure if I'm fixing a problem + * or introducing a limitation */ + for (i = 0 ; i < sizeof(qdev->fb_work_items) / sizeof(qdev->fb_work_items[0]) ; ++i) { + list_add_tail(&qdev->fb_work_items[i].head, &qdev->fb_work_item_free); + } + return 0; +} + + +struct qxl_fb_par { + u32 pseudo_palette[17]; /* why not 257? Xorg wants 256 elements. */ +}; + +static int qxl_fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static int qxl_fb_blank(int blank, struct fb_info *info) +{ + return 0; +} + + +static struct fb_ops qxlfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, // TODO: like vmwgfx + .fb_fillrect = qxl_fb_fillrect, + .fb_copyarea = qxl_fb_copyarea, + .fb_imageblit = qxl_fb_imageblit, + .fb_pan_display = qxl_fb_pan_display, + .fb_blank = qxl_fb_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj) +{ + struct qxl_bo *qbo = gem_to_qxl_bo(gobj); + int ret; + + ret = qxl_bo_reserve(qbo, false); + if (likely(ret == 0)) { + qxl_bo_kunmap(qbo); + qxl_bo_unpin(qbo); + qxl_bo_unreserve(qbo); + } + drm_gem_object_unreference_unlocked(gobj); +} + +int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, + struct drm_file *file_priv, + uint32_t *handle) +{ + int r; + struct drm_gem_object *gobj = qdev->fbdev_qfb->obj; + + BUG_ON(!gobj); + /* drm_get_handle_create adds a reference - good */ + r = drm_gem_handle_create(file_priv, gobj, handle); + if (r) { + return r; + } + return 0; +} + +static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **gobj_p) +{ + struct qxl_device *qdev = qfbdev->qdev; + struct drm_gem_object *gobj = NULL; + struct qxl_bo *qbo = NULL; + int ret; + int aligned_size, size; + int height = mode_cmd->height; + int bpp; + int depth; + + drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); + + size = mode_cmd->pitches[0] * height; + aligned_size = ALIGN(size, PAGE_SIZE); + // TODO: unallocate and reallocate surface0 for real. Hack to just + // have a large enough surface0 for 1024x768 Xorg 32bpp mode + ret = qxl_gem_object_create(qdev, 1024*768*4 /* aligned_size */, 0, + QXL_GEM_DOMAIN_VRAM, + false, /* is discardable */ + false, /* is kernel (false means device) */ + &gobj); + if (ret) { + printk(KERN_ERR "failed to allocate framebuffer (%d)\n", + aligned_size); + return -ENOMEM; + } + qbo = gem_to_qxl_bo(gobj); + + ret = qxl_bo_reserve(qbo, false); + if (unlikely(ret != 0)) + goto out_unref; + ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_VRAM, NULL); + if (ret) { + qxl_bo_unreserve(qbo); + goto out_unref; + } + ret = qxl_bo_kmap(qbo, NULL); + qxl_bo_unreserve(qbo); // unreserve, will be mmaped + if (ret) { + goto out_unref; + } + + *gobj_p = gobj; + return 0; +out_unref: + qxlfb_destroy_pinned_object(gobj); + *gobj_p = NULL; + return ret; +} + +static int qxlfb_create(struct qxl_fbdev *qfbdev, + struct drm_fb_helper_surface_size *sizes) +{ + struct qxl_device *qdev = qfbdev->qdev; + struct fb_info *info; + struct drm_framebuffer *fb = NULL; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_gem_object *gobj = NULL; + struct qxl_bo *qbo = NULL; + struct device *device = &qdev->pdev->dev; + int ret; + int size; + int bpp = sizes->surface_bpp; + int depth = sizes->surface_depth; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); + + ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj); + qdev->surface0_bo = qbo = gem_to_qxl_bo(gobj); + QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width, + mode_cmd.height, mode_cmd.pitches[0]); + qdev->surface0_shadow = kmalloc(mode_cmd.pitches[0] * mode_cmd.height, GFP_KERNEL); + BUG_ON(!qdev->surface0_shadow); /* TODO: what's the usual response to memory allocation errors? that one. */ + QXL_INFO(qdev, "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", + qxl_bo_gpu_offset(qdev->surface0_bo), + qxl_bo_mmap_offset(qdev->surface0_bo), + qdev->surface0_bo->kptr, + qdev->surface0_shadow); + size = mode_cmd.pitches[0] * mode_cmd.height; + + info = framebuffer_alloc(0, device); + if (info == NULL) { + ret = -ENOMEM; + goto out_unref; + } + + info->par = qfbdev; + + qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj); + + fb = &qfbdev->qfb.base; + + /* setup helper with fb data */ + qfbdev->helper.fb = fb; + qfbdev->helper.fbdev = info; + + // TODO: memset_io/memset causes 100k exits/sec for 60 seconds. wtf? + //memset_io(qbo->kptr, 0xff, size); // TODO - 0xff on purpose? + + strcpy(info->fix.id, "qxldrmfb"); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + + info->flags = FBINFO_DEFAULT; + info->fbops = &qxlfb_ops; + + // TODO: using gobj->size in various places in this function. Not sure + // what the difference between the different sizes is. +// tmp = fb_gpuaddr - qdev->mc.vram_location; + // info->fix.smem_start = qdev->mc.aper_base + tmp; + info->fix.smem_start = qdev->vram_base; // TODO - correct? + info->fix.smem_len = gobj->size; + info->screen_base = qdev->surface0_shadow; + info->screen_size = gobj->size; + + drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, sizes->fb_height); + + /* setup aperture base/size for vesafb takeover */ + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out_unref; + } + info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; + info->apertures->ranges[0].size = qdev->vram_size; + + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; + info->pixmap.size = 64*1024; /* TODO: not correct - must be taken from pci */ + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + if (info->screen_base == NULL) { + ret = -ENOSPC; + goto out_unref; + } + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out_unref; + } + + info->fbdefio = &qxl_defio; + fb_deferred_io_init(info); + + qdev->fbdev_info = info; + qdev->fbdev_qfb = &qfbdev->qfb; + DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); + // DRM_INFO("vram apper at 0x%lX\n", (unsigned long)qdev->mc.aper_base); + DRM_INFO("size %lu\n", (unsigned long)info->screen_size); + DRM_INFO("fb depth is %d\n", fb->depth); + DRM_INFO(" pitch is %d\n", fb->pitches[0]); + DRM_INFO(" width is %d\n", fb->width); + DRM_INFO(" height is %d\n", fb->height); + + return 0; + +out_unref: + if (qbo) { + ret = qxl_bo_reserve(qbo, false); + if (likely(ret == 0)) { + qxl_bo_kunmap(qbo); + qxl_bo_unpin(qbo); + qxl_bo_unreserve(qbo); + } + } + if (fb && ret) { + drm_gem_object_unreference(gobj); + drm_framebuffer_cleanup(fb); + kfree(fb); + } + drm_gem_object_unreference(gobj); + return ret; +} + +static int qxl_fb_find_or_create_single(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper; + int new_fb = 0; + int ret; + + if (!helper->fb) { + printk("%s:%d\n", __func__, __LINE__); + ret = qxlfb_create(qfbdev, sizes); + printk("%s:%d %d\n", __func__, __LINE__, ret); + if (ret) + return ret; + new_fb = 1; + } + return new_fb; +} + +static char *mode_option; +int qxl_parse_options(char *options) +{ + char *this_opt; + + if (!options || !*options) + return 0; + + while ((this_opt = strsep(&options, ",")) != NULL) { + if (!*this_opt) + continue; + mode_option = this_opt; + } + return 0; +} + +static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) +{ + struct fb_info *info; + struct qxl_framebuffer *qfb = &qfbdev->qfb; + + if (qfbdev->helper.fbdev) { + info = qfbdev->helper.fbdev; + + unregister_framebuffer(info); + framebuffer_release(info); + } + if (qfb->obj) { + qxlfb_destroy_pinned_object(qfb->obj); + qfb->obj = NULL; + } + drm_fb_helper_fini(&qfbdev->helper); + drm_framebuffer_cleanup(&qfb->base); + + return 0; +} + +static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { + // .gamma_set = qxl_crtc_fb_gamma_set, + // .gamma_get = qxl_crtc_fb_gamma_get, + .fb_probe = qxl_fb_find_or_create_single, +}; + +/* TODO: defio + * How does that work? can I get the changed data to send as opaques? + * Otherwise I need to invalidate an area - counter to qxl phylosophy. + */ + +int qxl_fbdev_init(struct qxl_device *qdev) +{ + struct qxl_fbdev *qfbdev; + int bpp_sel = 32; /* TODO: parameter from somewhere? */ + int ret; + + qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); + if (!qfbdev) + return -ENOMEM; + + qfbdev->qdev = qdev; + qdev->mode_info.qfbdev = qfbdev; + qfbdev->helper.funcs = &qxl_fb_helper_funcs; + + printk("%s: 1\n", __func__); + ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, + 1 /* num_crtc - QXL supports just 1 */, + QXLFB_CONN_LIMIT); + if (ret) { + kfree(qfbdev); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&qfbdev->helper); + drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel); + return 0; +} + +void qxl_fbdev_fini(struct qxl_device *qdev) +{ + if (!qdev->mode_info.qfbdev) + return; + + qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev); + kfree(qdev->mode_info.qfbdev); + qdev->mode_info.qfbdev = NULL; +} + +void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) +{ + fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state); +} + +int qxl_fbdev_total_size(struct qxl_device *qdev) +{ + struct qxl_bo *robj; + int size = 0; + + robj = gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj); + size += qxl_bo_size(robj); + return size; +} + +bool qxl_fbdev_robj_is_fb(struct qxl_device *qdev, struct qxl_bo *robj) +{ + if (robj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj)) + return true; + return false; +} diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c new file mode 100644 index 00000000000..11d85f59aa5 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -0,0 +1,157 @@ +#include "drmP.h" +#include "drm.h" +#include "qxl_drm.h" +#include "qxl_drv.h" +#include "qxl_object.h" + +int qxl_gem_object_init(struct drm_gem_object *obj) +{ + /* we do nothings here */ + return 0; +} + +void qxl_gem_object_free(struct drm_gem_object *gobj) +{ + struct qxl_bo *qobj = gem_to_qxl_bo(gobj); + + if (qobj) { + // or unref? + qxl_bo_free(qobj); + } +} + +int qxl_gem_object_create(struct qxl_device *qdev, int size, + int alignment, int initial_domain, + bool discardable, bool kernel, + struct drm_gem_object **obj) +{ + struct qxl_bo *qbo; + int r; + + *obj = NULL; + /* At least align on page size */ + if (alignment < PAGE_SIZE) { + alignment = PAGE_SIZE; + } + r = qxl_bo_create(qdev, size, kernel, initial_domain, &qbo); + if (r) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + size, initial_domain, alignment, r); + return r; + } + *obj = &qbo->gem_base; + + mutex_lock(&qbo->qdev->gem.mutex); + list_add_tail(&qbo->list, &qdev->gem.objects); + mutex_unlock(&qbo->qdev->gem.mutex); + + return 0; +} + +int qxl_gem_object_create_with_handle(struct qxl_device *qdev, + struct drm_file *file_priv, + u32 domain, + size_t size, + struct qxl_bo **qobj, + uint32_t *handle) +{ + struct drm_gem_object *gobj; + int r; + + BUG_ON(!qobj); + BUG_ON(!handle); + + r = qxl_gem_object_create(qdev, size, 0, + domain, + false, false, + &gobj); + if (r) + return -ENOMEM; + r = drm_gem_handle_create(file_priv, gobj, handle); + if (r) { + return r; + } + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); + *qobj = gem_to_qxl_bo(gobj); + qxl_bo_kmap(*qobj, NULL); + return 0; +} + +int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, + uint64_t *gpu_addr) +{ + struct qxl_bo *qobj = obj->driver_private; + int r; + + r = qxl_bo_reserve(qobj, false); + if (unlikely(r != 0)) + return r; + r = qxl_bo_pin(qobj, pin_domain, gpu_addr); + qxl_bo_unreserve(qobj); + return r; +} + +void qxl_gem_object_unpin(struct drm_gem_object *obj) +{ + struct qxl_bo *qobj = obj->driver_private; + int r; + + r = qxl_bo_reserve(qobj, false); + if (likely(r == 0)) { + qxl_bo_unpin(qobj); + qxl_bo_unreserve(qobj); + } +} + +int qxl_gem_set_domain(struct drm_gem_object *gobj, + uint32_t rdomain, uint32_t wdomain) +{ + struct qxl_bo *qobj; + uint32_t domain; + int r; + + /* FIXME: reeimplement */ + qobj = gobj->driver_private; + /* work out where to validate the buffer to */ + domain = wdomain; + if (!domain) { + domain = rdomain; + } + if (!domain) { + /* Do nothings */ + printk(KERN_WARNING "Set domain withou domain !\n"); + return 0; + } + if (domain == QXL_GEM_DOMAIN_CPU) { + /* Asking for cpu access wait for object idle */ + r = qxl_bo_wait(qobj, NULL, false); + if (r) { + printk(KERN_ERR "Failed to wait for object !\n"); + return r; + } + } + return 0; +} + +int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) +{ + return 0; +} + +void qxl_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ +} + +int qxl_gem_init(struct qxl_device *qdev) +{ + INIT_LIST_HEAD(&qdev->gem.objects); + return 0; +} + +void qxl_gem_fini(struct qxl_device *qdev) +{ + qxl_bo_force_delete(qdev); +} diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c new file mode 100644 index 00000000000..258cf8a8636 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -0,0 +1,313 @@ +#include <linux/gfp.h> +#include <linux/slab.h> + +#include "qxl_drv.h" +#include "lookup3.h" + +struct image_info +{ + struct qxl_image *image; + int ref_count; + struct image_info *next; +}; + +#define HASH_SIZE 4096 +static struct image_info *image_table[HASH_SIZE]; + +static unsigned int +hash_and_copy(const uint8_t *src, int src_stride, + uint8_t *dest, int dest_stride, + int width, int height) +{ + int i, j; + unsigned int hash = 0; + + for (i = 0; i < height; ++i) { + const uint8_t *src_line = src + i * src_stride; + uint8_t *dest_line = dest + i * dest_stride; + + for (j = 0; j < width; ++j) { + uint32_t *s = (uint32_t *)src_line; + uint32_t *d = (uint32_t *)dest_line; + + if (dest) + d[j] = s[j]; + } + + hash = hashlittle(src_line, width * sizeof(uint32_t), hash); + } + + return hash; +} + +static struct image_info * +lookup_image_info(unsigned int hash, + int width, + int height) +{ + struct image_info *info = image_table[hash % HASH_SIZE]; + + while (info) { + struct qxl_image *image = info->image; + + if (image->descriptor.id == hash && + image->descriptor.width == width && + image->descriptor.height == height) { + return info; + } + + info = info->next; + } + +#if 0 + ErrorF ("lookup of %u failed\n", hash); +#endif + + return NULL; +} + +static struct image_info * +insert_image_info(unsigned int hash) +{ + struct image_info *info = kmalloc(sizeof(struct image_info), GFP_KERNEL); + + if (!info) + return NULL; + + info->next = image_table[hash % HASH_SIZE]; + image_table[hash % HASH_SIZE] = info; + + return info; +} + +#if 0 +static void +remove_image_info(struct image_info *info) +{ + struct image_info **location = &image_table[info->image->descriptor.id % HASH_SIZE]; + + while (*location && (*location) != info) + location = &((*location)->next); + + if (*location) + *location = info->next; + + kfree(info); +} +#endif + +#if 0 +void debug_fill(uint8_t *data, int width, int height) +{ + int xx, yy; + uint32_t *p; + static int count = 0; + + for (yy = 0, p = data ; yy < height; ++yy) { + for (xx = 0 ; xx < width ; ++xx) { + (*p++) = (((count + yy) & 0xff) << 24) | + (((count + yy) & 0xff) << 16) | + (((count + yy) & 0xff) << 8) | + ((count + yy) & 0xff); + } + } + ++count; +} +#endif + + +static struct qxl_image * +qxl_image_create_helper(struct qxl_device *qdev, + struct drm_qxl_release *release, + struct qxl_bo **image_bo, + const uint8_t *data, + int width, int height, + int depth, unsigned int hash, + int stride) +{ + struct qxl_image *image; + struct qxl_data_chunk *chunk; + struct image_info *info; + int i; + int chunk_stride; + int linesize = width * depth / 8; + struct qxl_bo *chunk_bo; + +#if 0 + ErrorF ("Must create new image of size %d %d\n", width, height); +#endif + /* Chunk */ + /* FIXME: Check integer overflow */ + /* TODO: variable number of chunks */ + chunk_stride = stride; // TODO: should use linesize, but it renders wrong (check the bitmaps are sent correctly first) + chunk = qxl_allocnf(qdev, sizeof(*chunk) + height * chunk_stride, release); + chunk_bo = release->bos[release->bo_count - 1]; + + chunk->data_size = height * chunk_stride; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + + qxl_io_log(qdev, "%s: stride %d, chunk_s %d, linesize %d; " + "chunk %p, chunk->data %p, data %p\n", + __func__, stride, chunk_stride, linesize, chunk, + chunk ? chunk->data : NULL, data); + // TODO: put back hashing + if (stride == linesize && chunk_stride == stride) + memcpy(chunk->data, data, linesize * height); + else + for (i = 0 ; i < height ; ++i) { + memcpy(chunk->data + i*chunk_stride, data + i*stride, linesize); + } + //memcpy_fromio(chunk->data, data, height * stride); + //hash_and_copy(data, stride, + // chunk->data, stride, + // width, height); + + /* Image */ + image = qxl_allocnf(qdev, sizeof(*image), release); + *image_bo = release->bos[release->bo_count - 1]; + + image->descriptor.id = 0; + image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP; + + image->descriptor.flags = 0; + image->descriptor.width = width; + image->descriptor.height = height; + + switch (depth) { + case 1: + image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE; // TODO: BE? check by arch? + break; + case 24: + image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT; + break; + case 32: + image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT; + break; + default: + DRM_ERROR("unsupported image bit depth\n"); + return NULL; // TODO: cleanup + } + image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; + image->u.bitmap.x = width; + image->u.bitmap.y = height; + image->u.bitmap.stride = chunk_stride; + image->u.bitmap.palette = 0; + image->u.bitmap.data = qxl_fb_physical_address(qdev, chunk_bo); + + /* Add to hash table */ + if (!hash) { + QXL_INFO(qdev, "%s: id %d has size %d %d\n", __FUNCTION__, + image->descriptor.id, width, height); + + return image; + } + if((info = insert_image_info(hash))) { + info->image = image; + info->ref_count = 1; + + image->descriptor.id = hash; + image->descriptor.flags = QXL_IMAGE_CACHE; + +#if 0 + ErrorF("added with hash %u\n", hash); +#endif + } + QXL_INFO(qdev, "%s: id %d has size %d %d\n", __FUNCTION__, + image->descriptor.id, width, height); + return image; +} + +struct qxl_image *qxl_image_create(struct qxl_device *qdev, + struct drm_qxl_release *release, + struct qxl_bo **image_bo, + const uint8_t *data, + int x, int y, int width, int height, + int depth, int stride) +{ + unsigned int hash; + struct image_info *info; + + data += y * stride + x * (depth / 8); + // TODO: TEMP + return qxl_image_create_helper(qdev, release, image_bo, data, + width, height, depth, 0, stride); + + hash = hash_and_copy(data, stride, NULL, -1, width, height); + + info = lookup_image_info(hash, width, height); + if (info) { + int i, j; + qxl_io_log(qdev, "%s: image found in cache\n", __func__); + +#if 0 + ErrorF ("reusing image %p with hash %u (%d x %d)\n", info->image, hash, width, height); +#endif + + info->ref_count++; + + for (i = 0; i < height; ++i) { + struct qxl_data_chunk *chunk; + const uint8_t *src_line = data + i * stride; + uint32_t *dest_line; + + chunk = qxl_fb_virtual_address(qdev, info->image->u.bitmap.data); + + dest_line = (uint32_t *)chunk->data + width * i; + + for (j = 0; j < width; ++j) { + uint32_t *s = (uint32_t *)src_line; + uint32_t *d = (uint32_t *)dest_line; + + if (d[j] != s[j]) + { +#if 0 + ErrorF ("bad collision at (%d, %d)! %d != %d\n", j, i, s[j], d[j]); +#endif + goto out; + } + } + } +out: + return info->image; + } else { + return qxl_image_create_helper(qdev, release, image_bo, data, + width, height, depth, hash, stride); + } +} + +void qxl_image_destroy(struct qxl_device *qdev, + struct qxl_image *image) +{ +#if 0 + struct qxl_data_chunk *chunk; + struct image_info *info; + + chunk = qxl_virtual_address(qdev,(void *)image->u.bitmap.data); + + info = lookup_image_info(image->descriptor.id, + image->descriptor.width, + image->descriptor.height); + + if(info && info->image == image) { + --info->ref_count; + + if(info->ref_count != 0) + return; + +#if 0 + ErrorF("removed %p from hash table\n", info->image); +#endif + + remove_image_info(info); + } + + qxl_free(qdev->mem, chunk); + qxl_free(qdev->mem, image); +#endif +} + +void qxl_drop_image_cache(struct qxl_device *qdev) +{ + memset(image_table, 0, HASH_SIZE * sizeof(struct image_info *)); +} diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c new file mode 100644 index 00000000000..76ca99347a9 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -0,0 +1,70 @@ +#include "qxl_drv.h" + +irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *) arg; + struct qxl_device *qdev = (struct qxl_device *)dev->dev_private; + uint32_t pending; + + qdev->ram_header->int_mask = 0; + pending = xchg(&qdev->ram_header->int_pending, 0); + + atomic_inc(&qdev->irq_received); + + if (pending & QXL_INTERRUPT_DISPLAY) { + atomic_inc(&qdev->irq_received_display); + wake_up_all(&qdev->display_event); + } + if (pending & QXL_INTERRUPT_CURSOR) { + atomic_inc(&qdev->irq_received_cursor); + wake_up_all(&qdev->cursor_event); + } + if (pending & QXL_INTERRUPT_IO_CMD) { + atomic_inc(&qdev->irq_received_io_cmd); + wake_up_all(&qdev->io_cmd_event); + } + if (pending & QXL_INTERRUPT_ERROR) { + // TODO: log it, reset device (only way to exit this condition) + qdev->irq_received_error++; + qxl_io_log(qdev, "%s: driver is in bug mode.\n", __FUNCTION__); + } + if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { + qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n"); + schedule_work(&qdev->client_monitors_config_work); + } + qxl_io_log(qdev, "%s: pending %x\n", __FUNCTION__, pending); + qdev->ram_header->int_mask = ~0; + outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ); + return IRQ_HANDLED; +} + +static void qxl_client_monitors_config_work_func(struct work_struct *work) +{ + struct qxl_device *qdev = container_of(work, struct qxl_device, + client_monitors_config_work); + + qxl_display_read_client_monitors_config(qdev); +} + +int qxl_irq_init(struct qxl_device *qdev) +{ + int ret; + + init_waitqueue_head(&qdev->display_event); + init_waitqueue_head(&qdev->cursor_event); + init_waitqueue_head(&qdev->io_cmd_event); + INIT_WORK(&qdev->client_monitors_config_work, + qxl_client_monitors_config_work_func); + atomic_set(&qdev->irq_received, 0); + atomic_set(&qdev->irq_received_display, 0); + atomic_set(&qdev->irq_received_cursor, 0); + atomic_set(&qdev->irq_received_io_cmd, 0); + qdev->irq_received_error = 0; + ret = drm_irq_install(qdev->ddev); + qdev->ram_header->int_mask = ~0; + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + return 1; + } + return 0; +} diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c new file mode 100644 index 00000000000..da4025dec41 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -0,0 +1,521 @@ +#include "qxl_drv.h" +#include "qxl_object.h" + +static void qxl_dump_mode(struct qxl_device *qdev, void *p) +{ + struct qxl_mode *m = p; + DRM_INFO("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n", + m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili, + m->y_mili, m->orientation); +} + +static bool qxl_check_device(struct qxl_device *qdev) +{ + struct qxl_rom *rom = qdev->rom; + int mode_offset; + int i; + + if (rom->magic != 0x4f525851) { + DRM_ERROR("bad rom signature %x\n", rom->magic); + return false; + } + + // TODO: I would really like to have a GIT_VERSION in there. google/kernelnewbies/dave? + //DRM_INFO("Kernel Git Version %s\n", GIT_REVISION); + DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); + DRM_INFO("Compression level %d log level %d\n", rom->compression_level, + rom->log_level); + DRM_INFO("Currently using mode #%d, list at 0x%x\n", + rom->mode, rom->modes_offset); + DRM_INFO("%d io pages at offset 0x%x\n", + rom->num_io_pages, rom->pages_offset); + DRM_INFO("%d byte draw area at offset 0x%x\n", + rom->surface0_area_size, rom->draw_area_offset); + + qdev->vram_size = rom->surface0_area_size; + DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); + + mode_offset = rom->modes_offset / 4; + qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; + DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset, qdev->mode_info.num_modes); + qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); + for (i = 0; i < qdev->mode_info.num_modes; i++) + qxl_dump_mode(qdev, qdev->mode_info.modes + i); + return true; +} + +static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, + unsigned long start_phys_addr, unsigned long end_phys_addr) +{ + uint64_t high_bits; + struct qxl_memslot *slot; + uint8_t slot_index; + struct qxl_ram_header *ram_header = qdev->ram_header; + + slot_index = qdev->rom->slots_start + slot_index_offset; + slot = &qdev->mem_slots[slot_index]; + slot->start_phys_addr = start_phys_addr; + slot->end_phys_addr = end_phys_addr; + ram_header->mem_slot.mem_start = slot->start_phys_addr; + ram_header->mem_slot.mem_end = slot->end_phys_addr; + qxl_io_memslot_add(qdev, slot_index); + slot->generation = qdev->rom->slot_generation; + high_bits = slot_index << qdev->slot_gen_bits; + high_bits |= slot->generation; + high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); + slot->high_bits = high_bits; + return slot_index; +} + +int qxl_device_init(struct qxl_device *qdev, + struct drm_device *ddev, + struct pci_dev *pdev, + unsigned long flags) +{ + int r; + + DRM_INFO("qxl: Initialising\n"); + + qdev->dev = &pdev->dev; + qdev->ddev = ddev; + qdev->pdev = pdev; + qdev->flags = flags; + + mutex_init(&qdev->gem.mutex); +// rwlock_init(&qdev->fence_drv.lock); + INIT_LIST_HEAD(&qdev->gem.objects); + + qdev->rom_base = pci_resource_start(pdev, 2); + qdev->rom_size = pci_resource_len(pdev, 2); + qdev->vram_base = pci_resource_start(pdev, 0); + qdev->surfaceram_base = pci_resource_start(pdev, 1); + qdev->surfaceram_size = pci_resource_len(pdev, 1); + qdev->io_base = pci_resource_start(pdev, 3); + + DRM_INFO("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", + (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), + (int)pci_resource_len(pdev, 0) / 1024 / 1024, + (int)pci_resource_len(pdev, 0) / 1024, + (void *)qdev->surfaceram_base, (void *)pci_resource_end(pdev, 1), + (int)qdev->surfaceram_size / 1024 / 1024, + (int)qdev->surfaceram_size / 1024); + + qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); + if (!qdev->rom){ + printk(KERN_ERR "Unable to ioremap ROM\n"); + return -ENOMEM; + } + + qxl_check_device(qdev); + + r = qxl_bo_init(qdev); + if (r) { + DRM_ERROR("bo init failed %d\n", r); + return r; + } + + qdev->ram_header = ioremap(qdev->vram_base + + qdev->rom->ram_header_offset, + sizeof(*qdev->ram_header)); + + qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), + sizeof(struct qxl_command), + QXL_COMMAND_RING_SIZE, + qdev->io_base + QXL_IO_NOTIFY_CMD); + + qdev->cursor_ring = qxl_ring_create(&(qdev->ram_header->cursor_ring_hdr), + sizeof(struct qxl_command), + QXL_CURSOR_RING_SIZE, + qdev->io_base + QXL_IO_NOTIFY_CMD); + + qdev->release_ring = qxl_ring_create(&(qdev->ram_header->release_ring_hdr), + sizeof(uint64_t), + QXL_RELEASE_RING_SIZE, 0); +#if 0 + r = qxl_fence_driver_init(qdev); + if (r) { + DRM_ERROR("fence init failed %d\n", r); + return r; + } +#endif + + /* TODO - slot initialization should happen on reset. where is our reset handler? */ + qdev->n_mem_slots = qdev->rom->slots_end; + qdev->slot_gen_bits = qdev->rom->slot_gen_bits; + qdev->slot_id_bits = qdev->rom->slot_id_bits; + qdev->va_slot_mask = (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); + + qdev->mem_slots = kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), GFP_KERNEL); + + idr_init(&qdev->release_idr); + mutex_init(&qdev->release_idr_mutex); + mutex_init(&qdev->async_io_mutex); + + /* reset the device into a known state - no memslots, no primary created, + * no surfaces. */ + qxl_io_reset(qdev); + qdev->ram_header->guest_features = + QXL_GUEST_FEATURE_CLIENT_MONITORS_CONFIG_INTERRUPT; + outb(qdev->io_base + QXL_IO_FEATURES_SET, 0); + + /* must initialize irq before first async io - slot creation */ + r = qxl_irq_init(qdev); + if (r) + return r; + + /* + * Note that virtual is surface0. We rely on the single ioremap done before. + */ + qdev->main_mem_slot = setup_slot(qdev, 0, + (unsigned long)qdev->vram_base, + (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); + qdev->surfaces_mem_slot = setup_slot(qdev, 1, + (unsigned long)qdev->surfaceram_base, + (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); + DRM_INFO("main mem slot %d [%lx,%x)\n", + qdev->main_mem_slot, + (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); + + r = qxl_fb_init(qdev); + if (r) + return r; + + return 0; +} + +void qxl_device_fini(struct qxl_device *qdev) +{ + qxl_ring_free(qdev->command_ring); + qxl_ring_free(qdev->release_ring); +// qxl_fence_driver_fini(qdev); + qxl_bo_fini(qdev); + iounmap(qdev->ram_header); + iounmap(qdev->rom); + qdev->rom = NULL; + qdev->mode_info.modes = NULL; + qdev->mode_info.num_modes = 0; +} + +int qxl_driver_unload(struct drm_device *dev) +{ + struct qxl_device *qdev = dev->dev_private; + + if (qdev == NULL) + return 0; + qxl_modeset_fini(qdev); + qxl_device_fini(qdev); + + kfree(qdev); + dev->dev_private = NULL; + return 0; +} + +int qxl_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct qxl_device *qdev; + int r; + + // globals defined in qxl_debugfs.c + qxl_debug_level = 0; + qxl_debug_disable_fb = 0; + + /* require kms */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); + if (qdev == NULL) + return -ENOMEM; + + dev->dev_private = qdev; + + r = qxl_device_init(qdev, dev, dev->pdev, flags); + if (r) + goto out; + + r = qxl_modeset_init(qdev); + if (r) { + qxl_driver_unload(dev); + goto out; + } + + return 0; +out: + kfree(qdev); + return r; +} + +// TODO: putting the ioctl handles here for now, but probably should +// be moved to an appropriate file. +// TODO 2: allocating a new gem(in qxl_bo) for each request. +// This is wasteful since bo's are page aligned. + +int qxl_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct qxl_device *qdev = dev->dev_private; + struct drm_qxl_alloc *qxl_alloc = data; + int ret; + // TODO: actually take note of the drm_qxl_alloc->type flag, except for the primary + // surface creation (i.e. use the surfaces bar) + + if (qxl_alloc->size == 0) { + DRM_ERROR("invalid size %d\n", qxl_alloc->size); + return -EINVAL; + } + switch (qxl_alloc->type) { + case QXL_ALLOC_TYPE_SURFACE_PRIMARY: + // TODO: would be nice if the primary always had a handle of 1, + // but for that we would need to change drm gem code a little, so probably + // not worth it. + // Note: The size is a actually ignored here, we return a handle to the + // primary surface BO + ret = qxl_get_handle_for_primary_fb(qdev, file_priv, + &qxl_alloc->handle); + if (ret) { + DRM_ERROR("%s: failed to allocate handle for primary" + "fb gem object %p\n", __func__, + qdev->fbdev_qfb->obj); + } + break; + case QXL_ALLOC_TYPE_SURFACE: + case QXL_ALLOC_TYPE_DATA: { + struct qxl_bo *qobj; + uint32_t handle; + u32 domain = qxl_alloc->type == QXL_ALLOC_TYPE_SURFACE ? + QXL_GEM_DOMAIN_SURFACE : QXL_GEM_DOMAIN_VRAM; + ret = qxl_gem_object_create_with_handle(qdev, file_priv, + domain, + qxl_alloc->size, + &qobj, &handle); + if (ret) { + DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); + return -ENOMEM; + } + qxl_alloc->handle = handle; // TODO: change qxl_alloc to use 32 bit handles + break; + } + default: + DRM_ERROR("%s: unexpected alloc type %d\n", __func__, qxl_alloc->type); + return -EINVAL; + break; + } + return 0; +} + +int qxl_incref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_qxl_incref *incref = data; + struct drm_gem_object *gobj; + + gobj = drm_gem_object_lookup(dev, file_priv, incref->handle); /* this takes a reference */ + if (!gobj) { + DRM_ERROR("%s: invalid handle %u\n", __FUNCTION__, incref->handle); + return -EINVAL; + } + return 0; +} + +int qxl_decref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_qxl_decref *decref = data; + struct drm_gem_object *gobj; + + gobj = drm_gem_object_lookup(dev, file_priv, decref->handle); + if (!gobj) { + DRM_ERROR("%s: invalid handle %u\n", __FUNCTION__, decref->handle); + return -EINVAL; + } + drm_gem_object_unreference_unlocked(gobj); /* remove reference taken by lookup */ + drm_gem_object_unreference_unlocked(gobj); + return 0; +} + +int qxl_map_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct qxl_device *qdev = dev->dev_private; + struct drm_qxl_map *qxl_map = data; + + DRM_INFO("%s: handle in %d\n", __func__, qxl_map->handle); + return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle, &qxl_map->offset); +} + +int qxl_unmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_qxl_unmap *unmap = data; + + DRM_ERROR("%s: unimplemented\n", __FUNCTION__); + (void)unmap; + (void)dev; + (void)file_priv; + return 0; +} + +/* + * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's are on vram + * I believe, will see). + * *(src + src_off) = qxl_bo_physical_address(dst, dst_off) + */ +static void +apply_reloc(struct qxl_device *qdev, struct qxl_bo *src, uint64_t src_off, + struct qxl_bo *dst, uint64_t dst_off) +{ + *(uint64_t *)(src->kptr + src_off) = qxl_bo_physical_address(qdev, + dst, dst_off, qdev->main_mem_slot); +} + +struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, + struct drm_file *file_priv, uint64_t handle, + struct qxl_bo *handle_0_bo) +{ + struct drm_gem_object *gobj; + struct qxl_bo *qobj; + + if (handle == 0) { + return handle_0_bo; + } + gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); + if (!gobj) { + DRM_ERROR("bad bo handle %lld\n", handle); + return NULL; + } + qobj = gem_to_qxl_bo(gobj); + drm_gem_object_unreference_unlocked(gobj); + return qobj; +} + +/* + * Usage of execbuffer: + * Relocations need to take into account the full QXLDrawable size. + * However, the command as passed from user space must *not* contain the initial + * QXLReleaseInfo struct (first XXX bytes) + */ +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct qxl_device *qdev = dev->dev_private; + struct drm_qxl_execbuffer *execbuffer = data; + struct drm_qxl_command user_cmd; + int cmd_num; + struct qxl_bo *reloc_src_bo; + struct qxl_bo *reloc_dst_bo; + struct drm_qxl_reloc reloc; + void *fb_cmd; + int i; + + for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { + struct drm_qxl_release *release; + struct qxl_bo *cmd_bo; + int release_type; + int is_cursor; + struct drm_qxl_command *commands = + (struct drm_qxl_command *)execbuffer->commands; + + if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], + sizeof(user_cmd))) + return -EFAULT; + switch (user_cmd.type) { + case QXL_CMD_DRAW: + release_type = QXL_RELEASE_DRAWABLE; + is_cursor = 0; + break; + case QXL_CMD_SURFACE: + release_type = QXL_RELEASE_SURFACE_CMD; + is_cursor = 0; + break; + case QXL_CMD_CURSOR: + release_type = QXL_RELEASE_CURSOR_CMD; + is_cursor = 1; + break; + default: + qxl_io_log(qdev, "%s: bad command %d not in {%d, %d, %d}\n", + __FUNCTION__, user_cmd.type, + QXL_CMD_DRAW, QXL_CMD_SURFACE, QXL_CMD_CURSOR); + return -EFAULT; + } + fb_cmd = qxl_alloc_releasable(qdev, + sizeof(union qxl_release_info) + + user_cmd.command_size, + release_type, + &release, + &cmd_bo); + if (DRM_COPY_FROM_USER(fb_cmd + sizeof(union qxl_release_info), + (void *)user_cmd.command, + user_cmd.command_size)) + return -EFAULT; + qxl_io_log(qdev, "%s: type %d, size %d, #relocs %d\n", __func__, user_cmd.type, + user_cmd.command_size, user_cmd.relocs_num); + for (i = 0 ; i < user_cmd.relocs_num; ++i) { + if (DRM_COPY_FROM_USER(&reloc, + &((struct drm_qxl_reloc *)user_cmd.relocs)[i], + sizeof(reloc))) + return -EFAULT; + qxl_io_log(qdev, "%s: r#%d: %d+%d->%d+%d\n", __func__, i, reloc.src_handle, + reloc.src_offset, reloc.dst_handle, reloc.dst_offset); + reloc_src_bo = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, cmd_bo); + reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, cmd_bo); + if (!reloc_src_bo || !reloc_dst_bo) { + return -EINVAL; + } + apply_reloc(qdev, reloc_src_bo, reloc.src_offset, + reloc_dst_bo, reloc.dst_offset); + } + // TODO: multiple commands in a single push (introduce new QXLCommandBunch ?) + if (is_cursor) { + qxl_push_cursor_ring(qdev, cmd_bo, user_cmd.type); + } else { + qxl_push_command_ring(qdev, cmd_bo, user_cmd.type); + } + } + return 0; +} + +// TODO: this should be defined in ram or rom +#define NUM_SURFACES 1024 + +int qxl_update_area_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct qxl_device *qdev = dev->dev_private; + struct drm_qxl_update_area* update_area = data; + struct qxl_rect area = {.left = update_area->left, + .top = update_area->top, + .right = update_area->right, + .bottom = update_area->bottom}; + + if (update_area->surface_id > NUM_SURFACES) { + return -EINVAL; + } + if (update_area->left >= update_area->right || + update_area->top >= update_area->bottom) { + return -EINVAL; + } + + qxl_io_update_area(qdev, update_area->surface_id, &area); + return 0; +} + +struct drm_ioctl_desc qxl_ioctls[] = { + // TODO: what flags go for each of these? DRM_AUTH, ok, UNLOCKED? + DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(QXL_INCREF, qxl_incref_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(QXL_DECREF, qxl_decref_ioctl, DRM_AUTH|DRM_UNLOCKED), + + /* NB: those two don't actually do what the name says. + * This is the same for the DUMB_MAP/DUMB_UNMAP calls. What they + * actually do is provide the caller (userspace) with an offset + * to give to the mmap system call, which ends up in qxl_mmap, + * which calls ttm_bo_mmap, which actually mmaps. */ + DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(QXL_UNMAP, qxl_unmap_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH|DRM_UNLOCKED), +}; + +int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c new file mode 100644 index 00000000000..44f442f16f6 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -0,0 +1,225 @@ +#include "qxl_drv.h" +#include "qxl_drm.h" +#include "qxl_object.h" + +static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) +{ + struct qxl_bo *bo; + + //DRM_INFO("%s tbo %p\n", __func__, tbo); + bo = container_of(tbo, struct qxl_bo, tbo); + mutex_lock(&bo->qdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&bo->qdev->gem.mutex); + kfree(bo); +} + +bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &qxl_ttm_bo_destroy) + return true; + return false; +} + +void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) +{ + u32 c = 0; + + qbo->placement.fpfn = 0; + qbo->placement.lpfn = 0; + qbo->placement.placement = qbo->placements; + qbo->placement.busy_placement = qbo->placements; + if (domain & QXL_GEM_DOMAIN_VRAM) + qbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + if (domain & QXL_GEM_DOMAIN_SURFACE) + qbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_PRIV0; + if (domain & QXL_GEM_DOMAIN_CPU) + qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) + qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + qbo->placement.num_placement = c; + qbo->placement.num_busy_placement = c; +} + + +int qxl_bo_create(struct qxl_device *qdev, + unsigned long size, bool kernel, u32 domain, + struct qxl_bo **bo_ptr) +{ + struct qxl_bo *bo; + enum ttm_bo_type type; + int r; + static int max_size = 0; + + if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) { + qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; + } + if (kernel) { + type = ttm_bo_type_kernel; + } else { + type = ttm_bo_type_device; + } + *bo_ptr = NULL; + bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); + if (bo == NULL) + return -ENOMEM; + if ((size & (PAGE_SIZE - 1)) != 0) { + if (size > PAGE_SIZE) { + size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); + } else { + // Ok, this is not a GEM object, and it's too damn + // small, so just allocate it. + printk_once("small object allocator not implemented yet - wastage!!! %ld -> %ld\n", + size, PAGE_SIZE); + size = PAGE_SIZE; + } + } + if (size > max_size) { + max_size = size; + DRM_INFO("%s: max_size = %d\n", __FUNCTION__, max_size); + } + r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); + if (unlikely(r)) { + kfree(bo); + return r; + } + bo->gem_base.driver_private = NULL; + bo->qdev = qdev; + bo->surface_reg = -1; + INIT_LIST_HEAD(&bo->list); + + qxl_ttm_placement_from_domain(bo, domain); + /* Kernel allocation are uninterruptible */ + // TODO radeon has a vram_mutex here + r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, + &bo->placement, 0, 0, !kernel, NULL, size, + NULL, &qxl_ttm_bo_destroy); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(qdev->dev, + "object_init failed for (%lu, 0x%08X)\n", + size, domain); + return r; + } + *bo_ptr = bo; + return 0; +} + +int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) +{ + bool is_iomem; + int r; + + if (bo->kptr) { + if (ptr) { + *ptr = bo->kptr; + } + return 0; + } + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + if (r) { + return r; + } + bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); + if (ptr) { + *ptr = bo->kptr; + } + return 0; +} + +void qxl_bo_kunmap(struct qxl_bo *bo) +{ + if (bo->kptr == NULL) + return; + bo->kptr = NULL; + ttm_bo_kunmap(&bo->kmap); +} + +void qxl_bo_unref(struct qxl_bo **bo) +{ + struct ttm_buffer_object *tbo; + + if ((*bo) == NULL) + return; + tbo = &((*bo)->tbo); + ttm_bo_unref(&tbo); + if (tbo == NULL) + *bo = NULL; +} + +int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) +{ + int r, i; + + if (bo->pin_count) { + bo->pin_count++; + if (gpu_addr) + *gpu_addr = qxl_bo_gpu_offset(bo); + return 0; + } + qxl_ttm_placement_from_domain(bo, domain); + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); + if (likely(r == 0)) { + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = qxl_bo_gpu_offset(bo); + } + if (unlikely(r != 0)) + dev_err(bo->qdev->dev, "%p pin failed\n", bo); + return r; +} + +int qxl_bo_unpin(struct qxl_bo *bo) +{ + int r, i; + + if (!bo->pin_count) { + dev_warn(bo->qdev->dev, "%p unpin not necessary\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); + if (unlikely(r != 0)) + dev_err(bo->qdev->dev, "%p validate failed for unpin\n", bo); + return r; +} + +void qxl_bo_force_delete(struct qxl_device *qdev) +{ + struct qxl_bo *bo, *n; + + if (list_empty(&qdev->gem.objects)) { + return; + } + dev_err(qdev->dev, "Userspace still has active objects !\n"); + list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { + mutex_lock(&qdev->ddev->struct_mutex); + dev_err(qdev->dev, "%p %p %lu %lu force free\n", + &bo->gem_base, bo, (unsigned long)bo->gem_base.size, + *((unsigned long *)&bo->gem_base.refcount)); + mutex_lock(&bo->qdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&bo->qdev->gem.mutex); + /* this should unref the ttm bo */ + drm_gem_object_unreference(&bo->gem_base); + mutex_unlock(&qdev->ddev->struct_mutex); + } +} + +int qxl_bo_init(struct qxl_device *qdev) +{ + return qxl_ttm_init(qdev); +} + +void qxl_bo_fini(struct qxl_device *qdev) +{ + qxl_ttm_fini(qdev); +} diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h new file mode 100644 index 00000000000..33726f0ff32 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -0,0 +1,77 @@ +#ifndef QXL_OBJECT_H +#define QXL_OBJECT_H + +#include "qxl_drv.h" + +static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) +{ + int r; + + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(bo->qdev->dev, "%p reserve failed\n", bo); + return r; + } + return 0; +} + +static inline void qxl_bo_unreserve(struct qxl_bo *bo) +{ + ttm_bo_unreserve(&bo->tbo); +} + +static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo) +{ + return bo->tbo.offset; +} + +static inline unsigned long qxl_bo_size(struct qxl_bo *bo) +{ + return bo->tbo.num_pages << PAGE_SHIFT; +} + +static inline bool qxl_bo_is_reserved(struct qxl_bo *bo) +{ + return !!atomic_read(&bo->tbo.reserved); +} + +static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) +{ + return bo->tbo.addr_space_offset; +} + +static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, + bool no_wait) +{ + int r; + + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(bo->qdev->dev, "%p reserve failed for wait\n", bo); + return r; + } + spin_lock(&bo->tbo.bdev->fence_lock); + if (mem_type) + *mem_type = bo->tbo.mem.mem_type; + if (bo->tbo.sync_obj) + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + spin_unlock(&bo->tbo.bdev->fence_lock); + ttm_bo_unreserve(&bo->tbo); + return r; +} + +extern int qxl_bo_create(struct qxl_device *qdev, + unsigned long size, + bool kernel, u32 domain, + struct qxl_bo **bo_ptr); +extern void qxl_bo_free(struct qxl_bo *bo); +extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); +extern void qxl_bo_kunmap(struct qxl_bo *bo); +extern void qxl_bo_unref(struct qxl_bo **bo); +extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); +extern int qxl_bo_unpin(struct qxl_bo *bo); +extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); +extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); +#endif // QXL_OBJECT_H diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c new file mode 100644 index 00000000000..784fa38e13a --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -0,0 +1,449 @@ + +#include <ttm/ttm_bo_api.h> +#include <ttm/ttm_bo_driver.h> +#include <ttm/ttm_placement.h> +#include <ttm/ttm_module.h> +#include <drm/drmP.h> +#include <drm/drm.h> +#include <drm/qxl_drm.h> +#include "qxl_drv.h" +#include "qxl_object.h" + +static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) +{ + struct qxl_mman *mman; + struct qxl_device *qdev; + + mman = container_of(bdev, struct qxl_mman, bdev); + qdev = container_of(mman, struct qxl_device, mman); + return qdev; +} + +static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +static int qxl_ttm_global_init(struct qxl_device *qdev) +{ + struct drm_global_reference *global_ref; + int r; + + qdev->mman.mem_global_referenced = false; + global_ref = &qdev->mman.mem_global_ref; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &qxl_ttm_mem_global_init; + global_ref->release = &qxl_ttm_mem_global_release; + + r = drm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM memory accounting " + "subsystem.\n"); + return r; + } + + qdev->mman.bo_global_ref.mem_glob = + qdev->mman.mem_global_ref.object; + global_ref = &qdev->mman.bo_global_ref.ref; + global_ref->global_type = DRM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + r = drm_global_item_ref(global_ref); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); + drm_global_item_unref(&qdev->mman.mem_global_ref); + return r; + } + + qdev->mman.mem_global_referenced = true; + return 0; +} + +static void qxl_ttm_global_fini(struct qxl_device *qdev) +{ + if (qdev->mman.mem_global_referenced) { + drm_global_item_unref(&qdev->mman.bo_global_ref.ref); + drm_global_item_unref(&qdev->mman.mem_global_ref); + qdev->mman.mem_global_referenced = false; + } +} + +static struct vm_operations_struct qxl_ttm_vm_ops; +static const struct vm_operations_struct *ttm_vm_ops = NULL; + +static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo; + struct qxl_device *qdev; + int r; + + bo = (struct ttm_buffer_object *)vma->vm_private_data; + if (bo == NULL) { + return VM_FAULT_NOPAGE; + } + qdev = qxl_get_qdev(bo->bdev); + //mutex_lock(&qdev->vram_mutex); + r = ttm_vm_ops->fault(vma, vmf); + //mutex_unlock(&qdev->vram_mutex); + return r; +} + +int qxl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct qxl_device *qdev; + int r; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { + printk("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", __func__, vma->vm_pgoff); + return drm_mmap(filp, vma); + } + + file_priv = filp->private_data; + qdev = file_priv->minor->dev->dev_private; + if (qdev == NULL) { + DRM_ERROR("filp->private_data->minor->dev->dev_private == NULL\n"); + return -EINVAL; + } + QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %ld\n", + __func__, filp->private_data, vma->vm_pgoff); + + r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); + if (unlikely(r != 0)) { + return r; + } + if (unlikely(ttm_vm_ops == NULL)) { + ttm_vm_ops = vma->vm_ops; + qxl_ttm_vm_ops = *ttm_vm_ops; + qxl_ttm_vm_ops.fault = &qxl_ttm_fault; + } + vma->vm_ops = &qxl_ttm_vm_ops; + return 0; +} + +static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +{ + return 0; +} + +static int qxl_bo_man_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + return ttm_bo_manager_func.init(man, p_size); +} + +static int qxl_bo_man_takedown(struct ttm_mem_type_manager *man) +{ + return ttm_bo_manager_func.takedown(man); +} + +static int qxl_bo_man_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct qxl_device *qdev = container_of(man->bdev, struct qxl_device, + mman.bdev); + int ret; + int res = 0; + + ret = ttm_bo_manager_func.get_node(man, bo, placement, mem); + while (unlikely(ret || mem->mm_node == NULL)) { + qxl_io_notify_oom(qdev); + res += qxl_garbage_collect(qdev); + // TODO - sleep here + ret = ttm_bo_manager_func.get_node(man, bo, placement, mem); + } + if (unlikely(res > 0)) { + QXL_INFO(qdev, "%s: released %d\n", res); + } + return ret; +} + +static void qxl_bo_man_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + ttm_bo_manager_func.put_node(man, mem); +} + +static void qxl_bo_man_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + ttm_bo_manager_func.debug(man, prefix); +} + +static const struct ttm_mem_type_manager_func qxl_bo_manager_func = { + qxl_bo_man_init, + qxl_bo_man_takedown, + qxl_bo_man_get_node, + qxl_bo_man_put_node, + qxl_bo_man_debug, +}; + +static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + struct qxl_device *qdev; + + qdev = qxl_get_qdev(bdev); + + switch (type) { + case TTM_PL_SYSTEM: + /* System memory */ + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + case TTM_PL_PRIV0: + /* "On-card" video ram */ + man->func = &qxl_bo_manager_func; + man->gpu_offset = 0; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static void qxl_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + struct qxl_bo *qbo; + static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + + if (!qxl_ttm_bo_is_qxl_bo(bo)) { + placement->fpfn = 0; + placement->lpfn = 0; + placement->placement = &placements; + placement->busy_placement = &placements; + placement->num_placement = 1; + placement->num_busy_placement = 1; + return; + } + qbo = container_of(bo, struct qxl_bo, tbo); + qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); + *placement = qbo->placement; +} + +static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) +{ + return 0; +} + +#if 0 +static int qxl_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) +{ + return qxl_fence_wait((struct qxl_fence *)sync_obj, interruptible); +} + +static int qxl_sync_obj_flush(void *sync_obj, void *sync_arg) +{ + return 0; +} + +static void qxl_sync_obj_unref(void **sync_obj) +{ + qxl_fence_unref((struct qxl_fence **)sync_obj); +} + +static void *qxl_sync_obj_ref(void *sync_obj) +{ + return qxl_fence_ref((struct qxl_fence *)sync_obj); +} + +static bool qxl_sync_obj_signaled(void *sync_obj, void *sync_arg) +{ + return qxl_fence_signaled((struct qxl_fence *)sync_obj); +} +#endif + +static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct qxl_device *qdev = qxl_get_qdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.is_iomem = true; + mem->bus.base = qdev->vram_base; + mem->bus.offset = mem->start << PAGE_SHIFT; + break; + case TTM_PL_PRIV0: + qxl_io_log(qdev, "PRIV0 reservation\n"); + mem->bus.is_iomem = true; + mem->bus.base = qdev->surfaceram_base; + mem->bus.offset = mem->start << PAGE_SHIFT; + break; + default: + return -EINVAL; + } + return 0; +} + +static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + +/* + * TTM backend functions. + */ +struct qxl_ttm_tt { + struct ttm_dma_tt ttm; + struct qxl_device *qdev; + u64 offset; +}; + +static int qxl_ttm_backend_bind(struct ttm_tt *ttm, + struct ttm_mem_reg *bo_mem) +{ + struct qxl_ttm_tt *gtt = (void*)ttm; + + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); + if (!ttm->num_pages) { + WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + ttm->num_pages, bo_mem, ttm); + } + /* Not implemented */ + return -1; +} + +static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) +{ + /* Not implemented */ + return -1; +} + +static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) +{ + struct qxl_ttm_tt *gtt = (void *)ttm; + + ttm_dma_tt_fini(>t->ttm); + kfree(gtt); +} + +static struct ttm_backend_func qxl_backend_func = { + .bind = &qxl_ttm_backend_bind, + .unbind = &qxl_ttm_backend_unbind, + .destroy = &qxl_ttm_backend_destroy, +}; + + +struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) +{ + struct qxl_device *qdev; + struct qxl_ttm_tt *gtt; + + qdev = qxl_get_qdev(bdev); + gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); + if (gtt == NULL) { + return NULL; + } + gtt->ttm.ttm.func = &qxl_backend_func; + gtt->qdev = qdev; + if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { + kfree(gtt); + return NULL; + } + return >t->ttm.ttm; +} + + + +static struct ttm_bo_driver qxl_bo_driver = { + .ttm_tt_create = &qxl_ttm_tt_create, + .invalidate_caches = &qxl_invalidate_caches, + .init_mem_type = &qxl_init_mem_type, + .evict_flags = &qxl_evict_flags, +// .move = &qxl_bo_move, + .verify_access = &qxl_verify_access, +#if 0 + + .sync_obj_signaled = &qxl_sync_obj_signaled, + .sync_obj_wait = &qxl_sync_obj_wait, + .sync_obj_flush = &qxl_sync_obj_flush, + .sync_obj_unref = &qxl_sync_obj_unref, + .sync_obj_ref = &qxl_sync_obj_ref, + .move_notify = &qxl_bo_move_notify, + .fault_reserve_notify = &qxl_bo_fault_reserve_notify, +#endif + .io_mem_reserve = &qxl_ttm_io_mem_reserve, + .io_mem_free = &qxl_ttm_io_mem_free, +}; + +int qxl_ttm_init(struct qxl_device *qdev) +{ + int r; + int num_io_pages; /* != rom->num_io_pages, we include surface0 */ + + r = qxl_ttm_global_init(qdev); + if (r) { + return r; + } + /* No others user of address space so set it to 0 */ + r = ttm_bo_device_init(&qdev->mman.bdev, + qdev->mman.bo_global_ref.ref.object, + &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); + if (r) { + DRM_ERROR("failed initializing buffer object driver(%d).\n", r); + return r; + } + printk("%s: qdev->mman.bdev.glob == %p\n", __func__, + qdev->mman.bdev.glob); + /* NOTE: this includes the framebuffer (aka surface 0) */ + num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE; + r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM, + num_io_pages); + if (r) { + DRM_ERROR("Failed initializing VRAM heap.\n"); + return r; + } + r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0, + qdev->surfaceram_size / PAGE_SIZE); + if (r) { + DRM_ERROR("Failed initializing Surfaces heap.\n"); + return r; + } + DRM_INFO("qxl: %uM of VRAM memory size\n", + (unsigned)qdev->vram_size / (1024 * 1024)); + DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", + ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); + if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) { + qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; + } + return 0; +} + +void qxl_ttm_fini(struct qxl_device *qdev) +{ + ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); + ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0); + ttm_bo_device_release(&qdev->mman.bdev); + qxl_ttm_global_fini(qdev); + DRM_INFO("qxl: ttm finalized\n"); +} diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index c78bb997e2c..aff723986ef 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -721,3 +721,8 @@ #define ffb_PCI_IDS \ {0, 0, 0} + +#define qxl_PCI_IDS \ + {0x1b36, 0x0100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x1b36, 0x0100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, 0xffff00, 0}, \ + {0, 0, 0} |