summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlp Toker <alp@atoker.com>2007-01-07 02:03:30 +0000
committerBehdad Esfahbod <behdad@behdad.org>2007-01-07 01:35:15 -0500
commitcaa3c2e1e7901031488f5dae243a755a4e4daec9 (patch)
treefc87f147cb04ad5b82b74eda98c3bd2ced25a89c
parentc96a71e709e537f690da6d4a184aa4c64fe11028 (diff)
Fix various code/comment typos
-rw-r--r--pixman/src/pixregion.c2
-rw-r--r--src/cairo-matrix.c2
-rw-r--r--src/cairo-pdf-surface.c20
-rw-r--r--src/cairo-win32-surface.c2
-rw-r--r--src/cairo-xcb-surface.c4
-rw-r--r--src/cairo-xlib-surface.c10
6 files changed, 20 insertions, 20 deletions
diff --git a/pixman/src/pixregion.c b/pixman/src/pixregion.c
index 5112157ca..0404dff44 100644
--- a/pixman/src/pixregion.c
+++ b/pixman/src/pixregion.c
@@ -1421,7 +1421,7 @@ QuickSortRects(
* Step 2. Split the rectangles into the minimum number of proper y-x
* banded regions. This may require horizontally merging
* rectangles, and vertically coalescing bands. With any luck,
- * this step in an identity tranformation (ala the Box widget),
+ * this step in an identity transformation (ala the Box widget),
* or a coalescing into 1 box (ala Menus).
*
* Step 3. Merge the separate regions down to a single region by calling
diff --git a/src/cairo-matrix.c b/src/cairo-matrix.c
index d4f4bf776..b689f2aaf 100644
--- a/src/cairo-matrix.c
+++ b/src/cairo-matrix.c
@@ -102,7 +102,7 @@ slim_hidden_def(cairo_matrix_init);
* @x0: location to store x0 (X-translation component) of matrix, or %NULL
* @y0: location to store y0 (Y-translation component) of matrix, or %NULL
*
- * Gets the matrix values for the affine tranformation that @matrix represents.
+ * Gets the matrix values for the affine transformation that @matrix represents.
* See cairo_matrix_init().
*
*
diff --git a/src/cairo-pdf-surface.c b/src/cairo-pdf-surface.c
index a59cd99c6..2778d345a 100644
--- a/src/cairo-pdf-surface.c
+++ b/src/cairo-pdf-surface.c
@@ -626,7 +626,7 @@ compress_dup (const void *data, unsigned long data_size,
}
/* Emit alpha channel from the image into the given data, providing
- * and id that can be used to reference the resulting SMask object.
+ * an id that can be used to reference the resulting SMask object.
*
* In the case that the alpha channel happens to be all opaque, then
* no SMask object will be emitted and *id_ret will be set to 0.
@@ -1006,7 +1006,7 @@ emit_linear_colorgradient (cairo_pdf_surface_t *surface,
}
static cairo_pdf_resource_t
-emit_stiched_colorgradient (cairo_pdf_surface_t *surface,
+emit_stitched_colorgradient (cairo_pdf_surface_t *surface,
unsigned int n_stops,
cairo_pdf_color_stop_t stops[])
{
@@ -1020,7 +1020,7 @@ emit_stiched_colorgradient (cairo_pdf_surface_t *surface,
&stops[i+1]);
}
- /* ... and stich them together */
+ /* ... and stitch them together */
function = _cairo_pdf_surface_new_object (surface);
_cairo_output_stream_printf (surface->output,
"%d 0 obj\r\n"
@@ -1065,7 +1065,7 @@ emit_stiched_colorgradient (cairo_pdf_surface_t *surface,
return function;
}
-#define COLOR_STOP_EPSILLON 1e-6
+#define COLOR_STOP_EPSILON 1e-6
static cairo_pdf_resource_t
emit_pattern_stops (cairo_pdf_surface_t *surface, cairo_gradient_pattern_t *pattern)
@@ -1095,13 +1095,13 @@ emit_pattern_stops (cairo_pdf_surface_t *surface, cairo_gradient_pattern_t *patt
/* make sure first offset is 0.0 and last offset is 1.0. (Otherwise Acrobat
* Reader chokes.) */
- if (stops[0].offset > COLOR_STOP_EPSILLON) {
+ if (stops[0].offset > COLOR_STOP_EPSILON) {
memcpy (allstops, stops, sizeof (cairo_pdf_color_stop_t));
stops = allstops;
stops[0].offset = 0.0;
n_stops++;
}
- if (stops[n_stops-1].offset < 1.0 - COLOR_STOP_EPSILLON) {
+ if (stops[n_stops-1].offset < 1.0 - COLOR_STOP_EPSILON) {
memcpy (&stops[n_stops],
&stops[n_stops - 1],
sizeof (cairo_pdf_color_stop_t));
@@ -1110,12 +1110,12 @@ emit_pattern_stops (cairo_pdf_surface_t *surface, cairo_gradient_pattern_t *patt
}
if (n_stops == 2) {
- /* no need for stiched function */
+ /* no need for stitched function */
function = emit_linear_colorgradient (surface, &stops[0], &stops[1]);
} else {
- /* multiple stops: stich. XXX possible optimization: regulary spaced
- * stops do not require stiching. XXX */
- function = emit_stiched_colorgradient (surface,
+ /* multiple stops: stitch. XXX possible optimization: regulary spaced
+ * stops do not require stitching. XXX */
+ function = emit_stitched_colorgradient (surface,
n_stops,
stops);
}
diff --git a/src/cairo-win32-surface.c b/src/cairo-win32-surface.c
index 120849d47..2c2a5fdf2 100644
--- a/src/cairo-win32-surface.c
+++ b/src/cairo-win32-surface.c
@@ -1828,7 +1828,7 @@ cairo_win32_surface_get_dc (cairo_surface_t *surface)
}
/**
- * cario_win32_surface_get_image
+ * cairo_win32_surface_get_image
* @surface: a #cairo_surface_t
*
* Returns a #cairo_surface_t image surface that refers to the same bits
diff --git a/src/cairo-xcb-surface.c b/src/cairo-xcb-surface.c
index 8b8ba1d5b..2fdf8a1d5 100644
--- a/src/cairo-xcb-surface.c
+++ b/src/cairo-xcb-surface.c
@@ -926,7 +926,7 @@ _operator_needs_alpha_composite (cairo_operator_t op,
/* There is a bug in most older X servers with compositing using a
* untransformed repeating source pattern when the source is in off-screen
* video memory, and another with repeated transformed images using a
- * general tranform matrix. When these bugs could be triggered, we need a
+ * general transform matrix. When these bugs could be triggered, we need a
* fallback: in the common case where we have no transformation and the
* source and destination have the same format/visual, we can do the
* operation using the core protocol for the first bug, otherwise, we need
@@ -2020,7 +2020,7 @@ _cairo_xcb_surface_add_glyph (xcb_connection_t *dpy,
*
* This is a postscript-y model, where each glyph has its own
* coordinate space, so it's what we expose in terms of metrics. It's
- * apparantly what everyone's expecting. Everyone except the Render
+ * apparently what everyone's expecting. Everyone except the Render
* extension. Render wants to see a glyph tile starting at (0,0), with
* an origin offset inside, like this:
*
diff --git a/src/cairo-xlib-surface.c b/src/cairo-xlib-surface.c
index fbfae75de..6a0d3e441 100644
--- a/src/cairo-xlib-surface.c
+++ b/src/cairo-xlib-surface.c
@@ -1106,7 +1106,7 @@ _operator_needs_alpha_composite (cairo_operator_t op,
/* There is a bug in most older X servers with compositing using a
* untransformed repeating source pattern when the source is in off-screen
* video memory, and another with repeated transformed images using a
- * general tranform matrix. When these bugs could be triggered, we need a
+ * general transform matrix. When these bugs could be triggered, we need a
* fallback: in the common case where we have no transformation and the
* source and destination have the same format/visual, we can do the
* operation using the core protocol for the first bug, otherwise, we need
@@ -1166,7 +1166,7 @@ _categorize_composite_operation (cairo_xlib_surface_t *dst,
/* If these are on the same screen but otherwise incompatible,
* make a copy as core drawing can't cross depths and doesn't
- * work rightacross visuals of the same depth
+ * work right across visuals of the same depth
*/
if (_cairo_xlib_surface_same_screen (dst, src) &&
!_surfaces_compatible (dst, src))
@@ -2390,7 +2390,7 @@ _cairo_xlib_surface_add_glyph (Display *dpy,
*
* This is a postscript-y model, where each glyph has its own
* coordinate space, so it's what we expose in terms of metrics. It's
- * apparantly what everyone's expecting. Everyone except the Render
+ * apparently what everyone's expecting. Everyone except the Render
* extension. Render wants to see a glyph tile starting at (0,0), with
* an origin offset inside, like this:
*
@@ -2694,9 +2694,9 @@ _cairo_xlib_surface_emit_glyphs (cairo_xlib_surface_t *dst,
* the first zero-size glyph. However, we don't skip all size-zero
* glyphs, since that will force a new element at every space. We
* skip initial size-zero glyphs and hope that it's enough. Since
- * Xft never exposed that bug, this assumptation should be correct.
+ * Xft never exposed that bug, this assumption should be correct.
*
- * We also skip any glyph that hav troublesome coordinates. We want
+ * We also skip any glyphs that have troublesome coordinates. We want
* to make sure that (glyph2.x - (glyph1.x + glyph1.width)) fits in
* a signed 16bit integer, otherwise it will overflow in the render
* protocol.