sl@0: /* sl@0: * Copyright (c) 2005 Nokia Corporation and/or its subsidiary(-ies). sl@0: * All rights reserved. sl@0: * This component and the accompanying materials are made available sl@0: * under the terms of the License "Eclipse Public License v1.0" sl@0: * which accompanies this distribution, and is available sl@0: * at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: * sl@0: * Initial Contributors: sl@0: * Nokia Corporation - initial contribution. sl@0: * sl@0: * Contributors: sl@0: * sl@0: * Description: Transformation cache implementation sl@0: * sl@0: */ sl@0: sl@0: sl@0: /*! sl@0: * \internal sl@0: * \file sl@0: * \brief Transformation cache implementation sl@0: */ sl@0: sl@0: #ifndef M3G_CORE_INCLUDE sl@0: # error included by m3g_core.c; do not compile separately. sl@0: #endif sl@0: sl@0: #include "m3g_tcache.h" sl@0: sl@0: /* NOTE size MUST be a power of two! */ sl@0: #define TCACHE_COMPOSITES 128 sl@0: #define TCACHE_PATHS 128 sl@0: sl@0: /*---------------------------------------------------------------------- sl@0: * Data structures sl@0: *--------------------------------------------------------------------*/ sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief Transformation path cache element sl@0: */ sl@0: typedef struct sl@0: { sl@0: M3Gfloat elem[12]; /* NOTE the bottom row is always 0 0 0 1 */ sl@0: M3Guint mask; sl@0: M3Guint classified : 1; sl@0: M3Guint complete : 1; sl@0: sl@0: const Node *from, *to; sl@0: } TCachePath; sl@0: sl@0: M3G_CT_ASSERT2(sizeof(TCachePath) == 64); sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief Transformation cache data implementation sl@0: */ sl@0: struct TCacheImpl sl@0: { sl@0: Interface *m3g; sl@0: sl@0: TCachePath paths[TCACHE_PATHS]; sl@0: Matrix composites[TCACHE_COMPOSITES]; sl@0: const Transformable *compositeObjs[TCACHE_COMPOSITES]; sl@0: sl@0: M3Gbool pathsInvalid; sl@0: }; sl@0: sl@0: /*---------------------------------------------------------------------- sl@0: * Private functions sl@0: *--------------------------------------------------------------------*/ sl@0: sl@0: static M3G_INLINE M3Gint m3gTransformableHash(const Transformable *t) sl@0: { sl@0: M3Guint a = (M3Guint) t; sl@0: M3Guint b = (M3Guint) t; sl@0: sl@0: a += (a >> 3) + (a >> 9) + (a >> 17); sl@0: b = (b >> 16) | (b << 16); sl@0: b += (b >> 5) + (b >> 10) + (b >> 20); sl@0: return (M3Gint)(a ^ b); sl@0: } sl@0: sl@0: static M3Gint m3gPathHash(const Node *from, const Node *to) sl@0: { sl@0: M3Guint a = (M3Guint) from; sl@0: M3Guint b = (M3Guint) to; sl@0: sl@0: a += (a >> 3) + (a >> 9) + (a >> 17); sl@0: b = (b >> 16) | (b << 16); sl@0: b += (b >> 5) + (b >> 10) + (b >> 20); sl@0: return (M3Gint)(a ^ b); sl@0: } sl@0: sl@0: static M3G_INLINE M3Gint m3gGetTransformableSlot(const TCache *tc, const Transformable *t) sl@0: { sl@0: M3G_UNREF(tc); sl@0: return m3gTransformableHash(t) & (TCACHE_COMPOSITES - 1); sl@0: } sl@0: sl@0: static M3G_INLINE M3Gint m3gGetPathSlot(const TCache *tc, const Node *from, const Node *to) sl@0: { sl@0: M3G_UNREF(tc); sl@0: return m3gPathHash(from, to) & (TCACHE_PATHS - 1); sl@0: } sl@0: sl@0: /*---------------------------------------------------------------------- sl@0: * Internal functions sl@0: *--------------------------------------------------------------------*/ sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static TCache* m3gCreateTransformCache(Interface *m3g) sl@0: { sl@0: TCache *cache = m3gAllocZ(m3g, sizeof(TCache)); sl@0: if (cache) { sl@0: cache->m3g = m3g; sl@0: cache->pathsInvalid = M3G_TRUE; sl@0: } sl@0: return cache; sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static void m3gDeleteTransformCache(TCache *cache) sl@0: { sl@0: if (cache) { sl@0: m3gFree(cache->m3g, cache); sl@0: } sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static void m3gCacheComposite(TCache *cache, const Transformable *t, const Matrix *m) sl@0: { sl@0: M3Gint idx = m3gGetTransformableSlot(cache, t); sl@0: sl@0: /* If the matrix being added already exists in the cache, the sl@0: * cache is being used sub-optimally */ sl@0: sl@0: M3G_ASSERT(cache->compositeObjs[idx] != t); sl@0: sl@0: /* Just overwrite any existing entries, but keep track of sl@0: * collisions for profiling */ sl@0: sl@0: if (cache->compositeObjs[idx]) { sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_COLLISIONS, 1); sl@0: } sl@0: else { sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_LOAD, 1); sl@0: } sl@0: cache->composites[idx] = *m; sl@0: cache->compositeObjs[idx] = t; sl@0: sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_INSERTS, 1); sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static void m3gCachePath(TCache *cache, const Node *from, const Node *to, const Matrix *m) sl@0: { sl@0: M3G_ASSERT(m3gIsWUnity(m)); sl@0: /* These two asserts are not errors in a strict sense, but imply sl@0: * sub-optimal cache use */ sl@0: M3G_ASSERT(from || to); sl@0: M3G_ASSERT(from != to); sl@0: sl@0: M3G_BEGIN_PROFILE(cache->m3g, M3G_PROFILE_TCACHE); sl@0: sl@0: /* If the cache has been invalidated, wipe it clean before sl@0: * inserting anything */ sl@0: sl@0: if (cache->pathsInvalid) { sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_FLUSHES, 1); sl@0: m3gZero(cache->paths, TCACHE_PATHS * sizeof(cache->paths[0])); sl@0: cache->pathsInvalid = M3G_FALSE; sl@0: } sl@0: sl@0: /* Hash to the cache, then just overwrite anything previously sl@0: * there */ sl@0: { sl@0: M3Gint idx = m3gGetPathSlot(cache, from, to); sl@0: TCachePath *c = &cache->paths[idx]; sl@0: sl@0: /* If this assert is hit, the path being added already exists sl@0: * in the cache; this is not an error per se, but implies sl@0: * sub-optimal cache usage */ sl@0: M3G_ASSERT(c->from != from || c->to != to); sl@0: sl@0: /* Overwrite the matrix data */ sl@0: { sl@0: const M3Gfloat *src = &m->elem[0]; sl@0: M3Gfloat *dst = &c->elem[0]; sl@0: int row, col; sl@0: for (col = 0; col < 4; ++col) { sl@0: for (row = 0; row < 3; ++row) { sl@0: *dst++ = *src++; sl@0: } sl@0: ++src; sl@0: } sl@0: c->mask = m->mask; sl@0: c->classified = m->classified; sl@0: c->complete = m->complete; sl@0: } sl@0: sl@0: /* Register collisions for bookkeeping */ sl@0: sl@0: if (c->from || c->to) { sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_COLLISIONS, 1); sl@0: } sl@0: else { sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_LOAD, 1); sl@0: } sl@0: sl@0: c->from = from; sl@0: c->to = to; sl@0: } sl@0: M3G_END_PROFILE(cache->m3g, M3G_PROFILE_TCACHE); sl@0: sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_INSERTS, 1); sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static M3Gbool m3gGetCachedComposite(const TCache *cache, const Transformable *t, Matrix *m) sl@0: { sl@0: M3Gint idx = m3gGetTransformableSlot(cache, t); sl@0: if (cache->compositeObjs[idx] == t) { sl@0: *m = cache->composites[idx]; sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_HITS, 1); sl@0: return M3G_TRUE; sl@0: } sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_MISSES, 1); sl@0: return M3G_FALSE; sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static M3Gbool m3gGetCachedPath(const TCache *cache, const Node *from, const Node *to, Matrix *m) sl@0: { sl@0: M3G_BEGIN_PROFILE(cache->m3g, M3G_PROFILE_TCACHE); sl@0: sl@0: if (!cache->pathsInvalid) { sl@0: sl@0: /* Hash to the respective cache slot */ sl@0: sl@0: M3Gint idx = m3gGetPathSlot(cache, from, to); sl@0: const TCachePath *c = &cache->paths[idx]; sl@0: sl@0: /* If it matches, copy to the output matrix */ sl@0: sl@0: if (c->from == from && c->to == to) { sl@0: const M3Gfloat *src = &c->elem[0]; sl@0: M3Gfloat *dst = &m->elem[0]; sl@0: int col; sl@0: for (col = 0; col < 4; ++col) { sl@0: *dst++ = *src++; sl@0: *dst++ = *src++; sl@0: *dst++ = *src++; sl@0: *dst++ = 0.0f; sl@0: } sl@0: m->elem[15] = 1.0f; sl@0: m->mask = c->mask; sl@0: m->classified = c->classified; sl@0: m->complete = c->complete; sl@0: sl@0: M3G_END_PROFILE(cache->m3g, M3G_PROFILE_TCACHE); sl@0: sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_HITS, 1); sl@0: return M3G_TRUE; sl@0: } sl@0: } sl@0: M3G_END_PROFILE(cache->m3g, M3G_PROFILE_TCACHE); sl@0: sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_PATH_MISSES, 1); sl@0: return M3G_FALSE; sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static void m3gInvalidateCachedPaths(TCache *cache, const Node *n) sl@0: { sl@0: M3G_UNREF(n); sl@0: m3gResetStat(cache->m3g, M3G_STAT_TCACHE_PATH_LOAD); sl@0: cache->pathsInvalid = M3G_TRUE; sl@0: } sl@0: sl@0: /*! sl@0: * \internal sl@0: * \brief sl@0: */ sl@0: static void m3gInvalidateCachedTransforms(TCache *cache, const Transformable *t) sl@0: { sl@0: /* Look for a composite entry and wipe if found */ sl@0: { sl@0: M3Gint idx = m3gGetTransformableSlot(cache, t); sl@0: if (cache->compositeObjs[idx] == t) { sl@0: cache->compositeObjs[idx] = NULL; sl@0: m3gIncStat(cache->m3g, M3G_STAT_TCACHE_COMPOSITE_LOAD, -1); sl@0: } sl@0: } sl@0: sl@0: /* NOTE We just cast the pointer to a Node below -- it's only used sl@0: * for hashing, and every object will still be unique regardless sl@0: * of the class */ sl@0: sl@0: m3gInvalidateCachedPaths(cache, (const Node*) t); sl@0: }