Index: ZoneMinder-1.26.5/src/zm_image.cpp
===================================================================
--- ZoneMinder-1.26.5.orig/src/zm_image.cpp	2014-02-04 11:18:56.427421662 +1100
+++ ZoneMinder-1.26.5/src/zm_image.cpp	2014-02-04 11:36:58.592724844 +1100
@@ -2988,8 +2988,8 @@
 /************************************************* BLEND FUNCTIONS *************************************************/
 
 
-__attribute__((noinline,__target__("sse2"))) void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) {
 	static uint32_t divider = 0;
 	static uint32_t clearmask = 0;
 	static double current_blendpercent = 0.0;
@@ -3048,10 +3048,12 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count), "m" (clearmask), "m" (divider)
 	: "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "cc", "memory"
 	);
+}
 #else
+void sse2_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 __attribute__((noinline)) void std_fastblend(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count, double blendpercent) {
 	static int divider = 0;
@@ -3330,8 +3332,8 @@
 }
 
 /* Grayscale SSE2 */
-__attribute__((noinline,__target__("sse2"))) void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 
 	__asm__ __volatile__ (
 	"sub $0x10, %0\n\t"
@@ -3352,14 +3354,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count)
 	: "%xmm1", "%xmm2", "%xmm3", "%xmm4", "cc", "memory"
 	);
+}
 #else
+void sse2_delta8_gray8(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: RGBA SSE2 */
-__attribute__((noinline,__target__("sse2"))) void sse2_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
   
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3407,14 +3411,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "cc", "memory"
 	);
+}
 #else
+void sse2_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: BGRA SSE2 */
-__attribute__((noinline,__target__("sse2"))) void sse2_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
   
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3462,14 +3468,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "cc", "memory"
 	);
+}
 #else
+void sse2_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: ARGB SSE2 */
-__attribute__((noinline,__target__("sse2"))) void sse2_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
   
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3518,14 +3526,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "cc", "memory"
 	);
+}
 #else
+void sse2_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: ABGR SSE2 */
-__attribute__((noinline,__target__("sse2"))) void sse2_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("sse2"))) void sse2_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
   
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3574,14 +3584,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "cc", "memory"
 	);
+}
 #else
+void sse2_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: RGBA SSSE3 */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3626,14 +3638,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count), "m" (*movemask)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "cc", "memory"
 	);
+}
 #else
+void ssse3_delta8_rgba(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: BGRA SSSE3 */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3678,14 +3692,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count), "m" (*movemask)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "cc", "memory"
 	);
+}
 #else
+void ssse3_delta8_bgra(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: ARGB SSSE3 */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3731,14 +3747,16 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count), "m" (*movemask)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "cc", "memory"
 	);
+}
 #else
+void ssse3_delta8_argb(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* RGB32: ABGR SSSE3 */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -3784,10 +3802,12 @@
 	: "r" (col1), "r" (col2), "r" (result), "r" (count), "m" (*movemask)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "cc", "memory"
 	);
+}
 #else
+void ssse3_delta8_abgr(const uint8_t* col1, const uint8_t* col2, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 
 /************************************************* CONVERT FUNCTIONS *************************************************/
@@ -3989,8 +4009,8 @@
 }
 
 /* RGBA to grayscale SSSE3 */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_rgba_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_rgba_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
@@ -4029,14 +4049,16 @@
 	: "r" (col1), "r" (result), "r" (count), "m" (*movemask)
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "cc", "memory"
 	);
+}
 #else
+void ssse3_convert_rgba_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* Converts a YUYV image into grayscale by extracting the Y channel */
-__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_yuyv_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))  
+__attribute__((noinline,__target__("ssse3"))) void ssse3_convert_yuyv_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 	unsigned long i = 0;
   
 	__attribute__((aligned(16))) static const uint8_t movemask1[16] = {0,2,4,6,8,10,12,14,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
@@ -4074,10 +4096,12 @@
 #endif
 	: "%xmm3", "%xmm4", "cc", "memory"
 	);
+}
 #else
+void ssse3_convert_yuyv_gray8(const uint8_t* col1, uint8_t* result, unsigned long count) {
 	Panic("SSE function called on a non x86\\x86-64 platform");
-#endif
 }
+#endif
 
 /* YUYV to RGB24 - relocated from zm_local_camera.cpp */
 __attribute__((noinline)) void zm_convert_yuyv_rgb(const uint8_t* col1, uint8_t* result, unsigned long count) {
@@ -4652,8 +4676,9 @@
 }
 
 /* Grayscale SSSE3 */
+#if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
 __attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_gray8(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
-	
+
 	union {
 		uint32_t int32;
 		uint8_t int8a[4];
@@ -4662,11 +4687,11 @@
 	threshold_mask.int8a[1] = 0;
 	threshold_mask.int8a[2] = threshold;
 	threshold_mask.int8a[3] = 0;
-	
+
 	unsigned long row_width = width;
 	uint8_t* max_ptr = col1 + (row_width * (height-2));
 	uint8_t* max_ptr2 = col1 + row_width;
-	
+
 	__asm__ __volatile__ (
 	/* Load the threshold */
 	"mov %5, %%eax\n\t"
@@ -4674,9 +4699,9 @@
 	"pshufd $0x0, %%xmm4, %%xmm4\n\t"
 	/* Zero the temporary register */
 	"pxor %%xmm0, %%xmm0\n\t"
-	
+
 	"algo_ssse3_deinterlace_4field_gray8:\n\t"
-	
+
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4685,11 +4710,11 @@
 	"pminub %%xmm5, %%xmm2\n\t"
 	"psubb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4697,17 +4722,17 @@
 	"pmaxub %%xmm2, %%xmm1\n\t"
 	"pminub %%xmm6, %%xmm2\n\t"
 	"psubb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 	"movdqa %%xmm1, %%xmm2\n\t"
-	
+
 	/* Do the comparison on words instead of bytes because we don't have unsigned comparison */
 	"punpcklbw %%xmm0, %%xmm1\n\t"                     // Expand pixels 0-7 into words into xmm1
 	"punpckhbw %%xmm0, %%xmm2\n\t"                     // Expand pixels 8-15 into words into xmm2
 	"pcmpgtw %%xmm4, %%xmm1\n\t"                       // Compare average delta with threshold for pixels 0-7
 	"pcmpgtw %%xmm4, %%xmm2\n\t"                       // Compare average delta with threshold for pixels 8-15
 	"packsswb %%xmm2, %%xmm1\n\t"                      // Pack the comparison results into xmm1
-	
+
 	"movdqa (%0,%4), %%xmm2\n\t"                       // Load pbelow
 	"pavgb %%xmm5, %%xmm2\n\t"                         // Average pabove and pbelow
 	"pand %%xmm1, %%xmm2\n\t"                          // Filter out pixels in avg that shouldn't be copied
@@ -4715,24 +4740,24 @@
 
 	"por %%xmm2, %%xmm1\n\t"                           // Put the new values in pcurrent
 	"movntdq %%xmm1, (%0)\n\t"                         // Write pcurrent
-	
+
 	"sub %4, %0\n\t"                                   // Restore pcurrent to pabove
 	"sub %4, %1\n\t"                                   // Restore pncurrent to pnabove
-	
+
 	/* Next pixels */
 	"add $0x10, %0\n\t"                                // Add 16 to pcurrent
 	"add $0x10, %1\n\t"                                // Add 16 to pncurrent
-	
+
 	/* Check if we reached the row end */
 	"cmp %2, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_gray8\n\t"       // Go for another iteration
-	
+
 	/* Next row */
 	"add %4, %0\n\t"                                   // Add width to pcurrent
 	"add %4, %1\n\t"                                   // Add width to pncurrent
 	"mov %0, %2\n\t"
 	"add %4, %2\n\t"                                   // Add width to max_ptr2
-	
+
 	/* Check if we reached the end */
 	"cmp %3, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_gray8\n\t"       // Go for another iteration
@@ -4746,11 +4771,11 @@
 	"pminub %%xmm5, %%xmm2\n\t"
 	"psubb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4758,17 +4783,17 @@
 	"pmaxub %%xmm2, %%xmm1\n\t"
 	"pminub %%xmm6, %%xmm2\n\t"
 	"psubb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 	"movdqa %%xmm1, %%xmm2\n\t"
-	
+
 	/* Do the comparison on words instead of bytes because we don't have unsigned comparison */
 	"punpcklbw %%xmm0, %%xmm1\n\t"                     // Expand pixels 0-7 into words into xmm1
 	"punpckhbw %%xmm0, %%xmm2\n\t"                     // Expand pixels 8-15 into words into xmm2
 	"pcmpgtw %%xmm4, %%xmm1\n\t"                       // Compare average delta with threshold for pixels 0-7
 	"pcmpgtw %%xmm4, %%xmm2\n\t"                       // Compare average delta with threshold for pixels 8-15
 	"packsswb %%xmm2, %%xmm1\n\t"                      // Pack the comparison results into xmm1
-	
+
 	"pand %%xmm1, %%xmm5\n\t"                          // Filter out pixels in pabove that shouldn't be copied
 	"pandn %%xmm6, %%xmm1\n\t"                         // Filter out pixels in pcurrent that should be replaced
 
@@ -4779,17 +4804,23 @@
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "cc", "memory"
 	);
 }
+#else
+void ssse3_deinterlace_4field_gray8(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
+	Panic("SSE function called on a non x86\\x86-64 platform");
+}
+#endif
 
 /* RGBA SSSE3 */
+#if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
 __attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_rgba(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
 	__attribute__((aligned(16))) static const uint8_t movemask2[16] = {1,1,1,1,1,0,0,2,9,9,9,9,9,8,8,10};
-	
+
 	const uint32_t threshold_val = threshold;
-	
+
 	unsigned long row_width = width*4;
 	uint8_t* max_ptr = col1 + (row_width * (height-2));
 	uint8_t* max_ptr2 = col1 + row_width;
-	
+
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
 	"movd %%eax, %%xmm4\n\t"
@@ -4802,9 +4833,9 @@
 #endif
 	/* Zero the temporary register */
 	"pxor %%xmm0, %%xmm0\n\t"
-	
+
 	"algo_ssse3_deinterlace_4field_rgba:\n\t"
-	
+
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4824,11 +4855,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4847,7 +4878,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -4855,7 +4886,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"movdqa (%0,%4), %%xmm2\n\t"                       // Load pbelow
@@ -4865,28 +4896,28 @@
 
 	"por %%xmm2, %%xmm1\n\t"                           // Put the new values in pcurrent
 	"movntdq %%xmm1, (%0)\n\t"                         // Write pcurrent
-	
+
 	"sub %4, %0\n\t"                                   // Restore pcurrent to pabove
 	"sub %4, %1\n\t"                                   // Restore pncurrent to pnabove
-	
+
 	/* Next pixels */
 	"add $0x10, %0\n\t"                                // Add 16 to pcurrent
 	"add $0x10, %1\n\t"                                // Add 16 to pncurrent
-	
+
 	/* Check if we reached the row end */
 	"cmp %2, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_rgba\n\t"        // Go for another iteration
-	
+
 	/* Next row */
 	"add %4, %0\n\t"                                   // Add width to pcurrent
 	"add %4, %1\n\t"                                   // Add width to pncurrent
 	"mov %0, %2\n\t"
 	"add %4, %2\n\t"                                   // Add width to max_ptr2
-	
+
 	/* Check if we reached the end */
 	"cmp %3, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_rgba\n\t"        // Go for another iteration
-	
+
 	/* Special case for the last line */
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
@@ -4907,11 +4938,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -4930,7 +4961,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -4938,7 +4969,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"pand %%xmm1, %%xmm5\n\t"                          // Filter out pixels in pabove that shouldn't be copied
@@ -4955,17 +4986,23 @@
 #endif
 	);
 }
+#else
+void ssse3_deinterlace_4field_rgba(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
+	Panic("SSE function called on a non x86\\x86-64 platform");
+}
+#endif
 
 /* BGRA SSSE3 */
+#if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
 __attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_bgra(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
 	__attribute__((aligned(16))) static const uint8_t movemask2[16] = {1,1,1,1,1,2,2,0,9,9,9,9,9,10,10,8};
-	
+
 	const uint32_t threshold_val = threshold;
-	
+
 	unsigned long row_width = width*4;
 	uint8_t* max_ptr = col1 + (row_width * (height-2));
 	uint8_t* max_ptr2 = col1 + row_width;
-	
+
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
 	"movd %%eax, %%xmm4\n\t"
@@ -4978,9 +5015,9 @@
 #endif
 	/* Zero the temporary register */
 	"pxor %%xmm0, %%xmm0\n\t"
-	
+
 	"algo_ssse3_deinterlace_4field_bgra:\n\t"
-	
+
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5000,11 +5037,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5023,7 +5060,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5031,7 +5068,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"movdqa (%0,%4), %%xmm2\n\t"                       // Load pbelow
@@ -5041,28 +5078,28 @@
 
 	"por %%xmm2, %%xmm1\n\t"                           // Put the new values in pcurrent
 	"movntdq %%xmm1, (%0)\n\t"                         // Write pcurrent
-	
+
 	"sub %4, %0\n\t"                                   // Restore pcurrent to pabove
 	"sub %4, %1\n\t"                                   // Restore pncurrent to pnabove
-	
+
 	/* Next pixels */
 	"add $0x10, %0\n\t"                                // Add 16 to pcurrent
 	"add $0x10, %1\n\t"                                // Add 16 to pncurrent
-	
+
 	/* Check if we reached the row end */
 	"cmp %2, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_bgra\n\t"        // Go for another iteration
-	
+
 	/* Next row */
 	"add %4, %0\n\t"                                   // Add width to pcurrent
 	"add %4, %1\n\t"                                   // Add width to pncurrent
 	"mov %0, %2\n\t"
 	"add %4, %2\n\t"                                   // Add width to max_ptr2
-	
+
 	/* Check if we reached the end */
 	"cmp %3, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_bgra\n\t"        // Go for another iteration
-	
+
 	/* Special case for the last line */
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
@@ -5083,11 +5120,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5106,7 +5143,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5114,7 +5151,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"pand %%xmm1, %%xmm5\n\t"                          // Filter out pixels in pabove that shouldn't be copied
@@ -5129,19 +5166,25 @@
 #else
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "cc", "memory"
 #endif
-	);
+            );
+}
+#else
+void ssse3_deinterlace_4field_bgra(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
+	Panic("SSE function called on a non x86\\x86-64 platform");
 }
+#endif
 
 /* ARGB SSSE3 */
+#if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
 __attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_argb(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
 	__attribute__((aligned(16))) static const uint8_t movemask2[16] = {2,2,2,2,2,1,1,3,10,10,10,10,10,9,9,11};
-	
+
 	const uint32_t threshold_val = threshold;
-	
+
 	unsigned long row_width = width*4;
 	uint8_t* max_ptr = col1 + (row_width * (height-2));
 	uint8_t* max_ptr2 = col1 + row_width;
-	
+
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
 	"movd %%eax, %%xmm4\n\t"
@@ -5154,9 +5197,9 @@
 #endif
 	/* Zero the temporary register */
 	"pxor %%xmm0, %%xmm0\n\t"
-	
+
 	"algo_ssse3_deinterlace_4field_argb:\n\t"
-	
+
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5176,11 +5219,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5199,7 +5242,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5207,7 +5250,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"movdqa (%0,%4), %%xmm2\n\t"                       // Load pbelow
@@ -5217,28 +5260,28 @@
 
 	"por %%xmm2, %%xmm1\n\t"                           // Put the new values in pcurrent
 	"movntdq %%xmm1, (%0)\n\t"                         // Write pcurrent
-	
+
 	"sub %4, %0\n\t"                                   // Restore pcurrent to pabove
 	"sub %4, %1\n\t"                                   // Restore pncurrent to pnabove
-	
+
 	/* Next pixels */
 	"add $0x10, %0\n\t"                                // Add 16 to pcurrent
 	"add $0x10, %1\n\t"                                // Add 16 to pncurrent
-	
+
 	/* Check if we reached the row end */
 	"cmp %2, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_argb\n\t"        // Go for another iteration
-	
+
 	/* Next row */
 	"add %4, %0\n\t"                                   // Add width to pcurrent
 	"add %4, %1\n\t"                                   // Add width to pncurrent
 	"mov %0, %2\n\t"
 	"add %4, %2\n\t"                                   // Add width to max_ptr2
-	
+
 	/* Check if we reached the end */
 	"cmp %3, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_argb\n\t"        // Go for another iteration
-	
+
 	/* Special case for the last line */
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
@@ -5259,11 +5302,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5282,7 +5325,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5290,7 +5333,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"pand %%xmm1, %%xmm5\n\t"                          // Filter out pixels in pabove that shouldn't be copied
@@ -5305,19 +5348,25 @@
 #else
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "cc", "memory"
 #endif
-	);
+            );
 }
+#else
+void ssse3_deinterlace_4field_argb(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
+	Panic("SSE function called on a non x86\\x86-64 platform");
+}
+#endif
 
 /* ABGR SSSE3 */
+#if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
 __attribute__((noinline,__target__("ssse3"))) void ssse3_deinterlace_4field_abgr(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
 	__attribute__((aligned(16))) static const uint8_t movemask2[16] = {2,2,2,2,2,3,3,1,10,10,10,10,10,11,11,9};
-	
+
 	const uint32_t threshold_val = threshold;
-	
+
 	unsigned long row_width = width*4;
 	uint8_t* max_ptr = col1 + (row_width * (height-2));
 	uint8_t* max_ptr2 = col1 + row_width;
-	
+
 	__asm__ __volatile__ (
 	"mov $0x1F1F1F1F, %%eax\n\t"
 	"movd %%eax, %%xmm4\n\t"
@@ -5330,9 +5379,9 @@
 #endif
 	/* Zero the temporary register */
 	"pxor %%xmm0, %%xmm0\n\t"
-	
+
 	"algo_ssse3_deinterlace_4field_abgr:\n\t"
-	
+
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5352,11 +5401,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5375,7 +5424,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5383,7 +5432,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"movdqa (%0,%4), %%xmm2\n\t"                       // Load pbelow
@@ -5393,28 +5442,28 @@
 
 	"por %%xmm2, %%xmm1\n\t"                           // Put the new values in pcurrent
 	"movntdq %%xmm1, (%0)\n\t"                         // Write pcurrent
-	
+
 	"sub %4, %0\n\t"                                   // Restore pcurrent to pabove
 	"sub %4, %1\n\t"                                   // Restore pncurrent to pnabove
-	
+
 	/* Next pixels */
 	"add $0x10, %0\n\t"                                // Add 16 to pcurrent
 	"add $0x10, %1\n\t"                                // Add 16 to pncurrent
-	
+
 	/* Check if we reached the row end */
 	"cmp %2, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_abgr\n\t"        // Go for another iteration
-	
+
 	/* Next row */
 	"add %4, %0\n\t"                                   // Add width to pcurrent
 	"add %4, %1\n\t"                                   // Add width to pncurrent
 	"mov %0, %2\n\t"
 	"add %4, %2\n\t"                                   // Add width to max_ptr2
-	
+
 	/* Check if we reached the end */
 	"cmp %3, %0\n\t"
 	"jb algo_ssse3_deinterlace_4field_abgr\n\t"        // Go for another iteration
-	
+
 	/* Special case for the last line */
 	/* Load pabove into xmm1 and pnabove into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
@@ -5435,11 +5484,11 @@
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
 	"movdqa %%xmm1, %%xmm7\n\t" /* Backup of delta2 in xmm7 for now */
-	
+
 	/* Next row */
 	"add %4, %0\n\t"
 	"add %4, %1\n\t"
-	
+
 	/* Load pcurrent into xmm1 and pncurrent into xmm2 */
 	"movdqa (%0), %%xmm1\n\t"
 	"movdqa (%1), %%xmm2\n\t"
@@ -5458,7 +5507,7 @@
 	"pshufb %%xmm3, %%xmm2\n\t"
 	"psadbw %%xmm0, %%xmm2\n\t"
 	"packuswb %%xmm2, %%xmm1\n\t"
-	
+
 	"pavgb %%xmm7, %%xmm1\n\t"                         // Average the two deltas together
 
 #if defined(__x86_64__)
@@ -5466,7 +5515,7 @@
 #else
 	"movd %%eax, %%xmm7\n\t"                           // Setup the threshold
 	"pshufd $0x0, %%xmm7, %%xmm7\n\t"
-	
+
 	"pcmpgtd %%xmm7, %%xmm1\n\t"                       // Compare average delta with the threshold
 #endif
 	"pand %%xmm1, %%xmm5\n\t"                          // Filter out pixels in pabove that shouldn't be copied
@@ -5481,6 +5530,10 @@
 #else
 	: "%eax", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "cc", "memory"
 #endif
-	);
+            );
 }
-
+#else
+void ssse3_deinterlace_4field_abgr(uint8_t* col1, uint8_t* col2, unsigned int threshold, unsigned int width, unsigned int height) {
+	Panic("SSE function called on a non x86\\x86-64 platform");
+}
+#endif
Index: ZoneMinder-1.26.5/src/zm_signal.cpp
===================================================================
--- ZoneMinder-1.26.5.orig/src/zm_signal.cpp	2014-02-04 11:18:56.427421662 +1100
+++ ZoneMinder-1.26.5/src/zm_signal.cpp	2014-02-04 11:26:04.602354626 +1100
@@ -47,11 +47,13 @@
 RETSIGTYPE zm_die_handler(int signal)
 #endif
 {
+#if (defined(__i386__) || defined(__x86_64__))
 	void *cr2 = 0;
 	void *ip = 0;
-
+#endif
 	Error("Got signal %d (%s), crashing", signal, strsignal(signal));
 
+#if (defined(__i386__) || defined(__x86_64__))
 	// Get more information if available
 #if ( HAVE_SIGINFO_T && HAVE_UCONTEXT_T )
 	if (info && context) {
@@ -103,7 +105,7 @@
 	Info("Backtrace complete, please execute the following command for more information");
 	Info(cmd);
 #endif				// ( !defined(ZM_NO_CRASHTRACE) && HAVE_DECL_BACKTRACE && HAVE_DECL_BACKTRACE_SYMBOLS )
-
+#endif                          // (defined(__i386__) || defined(__x86_64__)
 	exit(signal);
 }
 
Index: ZoneMinder-1.26.5/src/zm_utils.cpp
===================================================================
--- ZoneMinder-1.26.5.orig/src/zm_utils.cpp	2013-12-17 15:25:14.000000000 +1100
+++ ZoneMinder-1.26.5/src/zm_utils.cpp	2014-02-04 11:50:21.290724472 +1100
@@ -192,8 +192,8 @@
 
 /* SSE2 aligned memory copy. Useful for big copying of aligned memory like image buffers in ZM */
 /* For platforms without SSE2 we will use standard x86 asm memcpy or glibc's memcpy() */
-__attribute__((noinline,__target__("sse2"))) void* sse2_aligned_memcpy(void* dest, const void* src, size_t bytes) {
 #if ((defined(__i386__) || defined(__x86_64__) || defined(ZM_KEEP_SSE)) && !defined(ZM_STRIP_SSE))
+__attribute__((noinline,__target__("sse2"))) void* sse2_aligned_memcpy(void* dest, const void* src, size_t bytes) {
 	if(bytes > 128) {
 		unsigned int remainder = bytes % 128;
 		const uint8_t* lastsrc = (uint8_t*)src + (bytes - remainder);
@@ -234,12 +234,15 @@
 		/* Standard memcpy */
 		__asm__ __volatile__("cld; rep movsb" :: "S"(src), "D"(dest), "c"(bytes) : "cc", "memory");
 	}
+	return dest;
+}
 #else
+void* sse2_aligned_memcpy(void* dest, const void* src, size_t bytes) {
 	/* Non x86\x86-64 platform, use memcpy */
 	memcpy(dest,src,bytes);
-#endif
 	return dest;
 }
+#endif
 
 void timespec_diff(struct timespec *start, struct timespec *end, struct timespec *diff) {
 	if (((end->tv_nsec)-(start->tv_nsec))<0) {
