home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Shareware 1 2 the Maxx
/
sw_1.zip
/
sw_1
/
PROGRAM
/
CBGRX100.ZIP
/
CONTRIB
/
LIBGRX
/
SRC
/
BITCOPY.H
< prev
next >
Wrap
Text File
|
1992-04-10
|
13KB
|
362 lines
/**
** BITCOPY.H
**
** Copyright (C) 1992, Csaba Biegl
** 820 Stirrup Dr, Nashville, TN, 37221
** csaba@vuse.vanderbilt.edu
**
** This file is distributed under the terms listed in the document
** "copying.cb", available from the author at the address above.
** A copy of "copying.cb" should accompany this file; if not, a copy
** should be available from where this file was obtained. This file
** may not be distributed without a verbatim copy of "copying.cb".
** You should also have received a copy of the GNU General Public
** License along with this program (it is in the file "copying");
** if not, write to the Free Software Foundation, Inc., 675 Mass Ave,
** Cambridge, MA 02139, USA.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**/
#ifndef _BITCOPY_H_
#define _BITCOPY_H_
#ifdef __TURBOC__
#pragma inline
#endif
/*
* utilities -- other files may define them too
*/
#ifndef _SaveDS
#ifdef __TURBOC__
#define _ClrDir() asm cld
#define _SetDir() asm std
#define _SaveDS() asm push ds
#define _RestoreDS() asm pop ds
#endif
#ifdef __GNUC__
#define _ASV asm volatile
#define _ClrDir() _ASV("cld")
#define _SetDir() _ASV("std")
#define _SaveDS()
#define _RestoreDS()
#endif
#endif /* _SaveDS */
#ifdef __TURBOC__
/*
* Put home a single byte with various logical operations
* es:di - dest (to be incremented)
* al - byte to be written, already shifted if necessary
*/
#define __CPYBYTE__ asm stosb
#define __XORBYTE__ asm xor al,BYTE PTR es:[di]; asm stosb
#define __ORBYTE__ asm or al,BYTE PTR es:[di]; asm stosb
#define __ANDBYTE__ asm and al,BYTE PTR es:[di]; asm stosb
/*
* unshifted byte sized line copying with various logical operations
* es:di - dest
* ds:si - source
* cx - width
*/
#define __CPYLINE__(label) asm rep movsb
#define __XORLINE__(label) label: asm lodsb; __XORBYTE__; asm loop label
#define __ORLINE__(label) label: asm lodsb; __ORBYTE__; asm loop label
#define __ANDLINE__(label) label: asm lodsb; __ANDBYTE__; asm loop label
/*
* Copy a single byte with edge masking and various logical ops
* es:di - dest (to be incremented)
* al - source byte, shifted if necessary
* maskreg - has mask
*/
#define __CPYMASK__(maskreg) \
asm and al,maskreg; \
asm not maskreg; \
asm and maskreg,BYTE PTR es:[di]; \
asm or al,maskreg; \
asm stosb
#define __XORMASK__(maskreg) \
asm and al,maskreg; \
asm xor al,BYTE PTR es:[di]; \
asm stosb
#define __ORMASK__(maskreg) \
asm and al,maskreg; \
asm or al,BYTE PTR es:[di]; \
asm stosb
#define __ANDMASK__(maskreg) \
asm and al,maskreg; \
asm not maskreg; \
asm or al,maskreg; \
asm and al,BYTE PTR es:[di]; \
asm stosb
/*
* aligned line copying with masking if necessary
*/
#define __CPYMSKLINE__(dst,src,masks,wdt,type) do { \
_BX = masks; \
_CX = wdt; \
asm les di,DWORD PTR dst; \
asm lds si,DWORD PTR src; \
if(_BL) { asm lodsb; type##MASK__(bl); } \
type##LINE__(LineCopy##type##dst##src##Loop); \
if(_BH) { asm lodsb; type##MASK__(bh); } \
} while(0)
#define _CopyMskLine(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__CPY)
#define _CopyMskLineXor(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__XOR)
#define _CopyMskLineOr(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__OR)
#define _CopyMskLineAnd(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__AND)
/*
* edge masking only for aligned line copy
* NOTE: called when the width of the middle part is 0 or when it
* has been copied using the VGA latches all four planes at once!
*/
#define __CPYMSKEDGE__(dst,src,masks,wdt,type) do { \
_BX = masks; \
_CX = wdt; \
asm les di,DWORD PTR dst; \
asm lds si,DWORD PTR src; \
if(_BL) { asm lodsb; type##MASK__(bl); } \
asm add di,cx; \
asm add si,cx; \
if(_BH) { asm lodsb; type##MASK__(bh); } \
} while(0)
#define _CopyMskEdge(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__CPY)
#define _CopyMskEdgeXor(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__XOR)
#define _CopyMskEdgeOr(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__OR)
#define _CopyMskEdgeAnd(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__AND)
/*
* non-aligned shifted copying (need separate fwd and reverse)
*/
#define __SHIFTLINE__(dst,src,sft,msk,wdt,type,dir) do { \
_BX = msk; \
_CX = sft; \
_DX = wdt; \
asm les di,DWORD PTR dst; \
asm lds si,DWORD PTR src; \
asm and ch,1; \
asm jz Shift##dir##type##dst##src##NoInit; \
asm lodsb; \
asm xor ah,ah; \
asm ro##dir ax,cl; \
asm mov ch,ah; \
Shift##dir##type##dst##src##NoInit: \
if(_BL) { \
asm lodsb; \
asm xor ah,ah; \
asm ro##dir ax,cl; \
asm or al,ch; \
asm mov ch,ah; \
type##MASK__(bl); \
} \
if(_DX) { \
Shift##dir##type##dst##src##Loop: \
asm lodsb; \
asm xor ah,ah; \
asm ro##dir ax,cl; \
asm or al,ch; \
asm mov ch,ah; \
type##BYTE__; \
asm dec dx; \
asm jnz Shift##dir##type##dst##src##Loop; \
} \
if(_BH) { \
_DX = sft; \
asm and dh,2; \
asm jnz Shift##dir##type##dst##src##NoLastByte; \
asm lodsb; \
Shift##dir##type##dst##src##NoLastByte: \
asm sh##dir al,cl; \
asm or al,ch; \
type##MASK__(bh); \
} \
} while(0)
#define _FwdShiftLine(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__CPY,r)
#define _FwdShiftLineXor(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__XOR,r)
#define _FwdShiftLineOr(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__OR,r)
#define _FwdShiftLineAnd(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__AND,r)
#define _RevShiftLine(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__CPY,l)
#define _RevShiftLineXor(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__XOR,l)
#define _RevShiftLineOr(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__OR,l)
#define _RevShiftLineAnd(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__AND,l)
#endif /* __TURBOC__ */
#ifdef __GNUC__
/*
* Put home a single byte with various logical operations
* edi - dest (to be incremented)
* al - byte to be written, already shifted if necessary
*/
#define __CPYBYTE__ "stosb"
#define __XORBYTE__ "xorb (%%edi),%%al; stosb"
#define __ORBYTE__ "orb (%%edi),%%al; stosb"
#define __ANDBYTE__ "andb (%%edi),%%al; stosb"
/*
* unshifted byte sized line copying with various logical operations
* edi - dest
* dsi - source
* cx - width
*/
#define __CPYLINE__(label) #label": rep; movsb"
#define __XORLINE__(label) #label": lodsb; "__XORBYTE__" ; loop "#label
#define __ORLINE__(label) #label": lodsb; "__ORBYTE__ " ; loop "#label
#define __ANDLINE__(label) #label": lodsb; "__ANDBYTE__" ; loop "#label
/*
* Copy a single byte with edge masking and various logical ops
* edi - dest (to be incremented)
* al - source byte, shifted if necessary
* maskreg - has mask
*/
#define __CPYMASK__(MS) \
"andb "#MS",%%al; notb "#MS"; andb (%%edi),"#MS"; orb "#MS",%%al; stosb"
#define __XORMASK__(MS) \
"andb "#MS",%%al; xorb (%%edi),%%al; stosb"
#define __ORMASK__(MS) \
"andb "#MS",%%al; orb (%%edi),%%al; stosb"
#define __ANDMASK__(MS) \
"andb "#MS",%%al; notb "#MS"; orb "#MS",%%al; andb (%%edi),%%al; stosb"
/*
* aligned line copying with masking if necessary
* NOTE: does not check for zero middle part width!!!
*/
#define __CPYMSKLINE__(dst,src,masks,wdt,type) _ASV(" \n\
movl %0,%%edi \n\
movl %1,%%esi \n\
movl %2,%%ebx \n\
movl %3,%%ecx \n\
orb %%bl,%%bl \n\
jz L_CpyLine"#type"BodyLoop \n\
lodsb \n\
"type##MASK__(%%bl)" \n\
"type##LINE__(L_CpyLine##type##BodyLoop)" \n\
orb %%bh,%%bh \n\
jz L_CpyLine"#type"NoLastMask \n\
lodsb \n\
"type##MASK__(%%bh)" \n\
L_CpyLine"#type"NoLastMask: "\
: /* NOTHING */ \
: "g" (dst), "g" (src), "g" (masks), "g" (wdt) \
: "di", "si", "cx", "bx", "ax" \
)
#define _CopyMskLine(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__CPY)
#define _CopyMskLineXor(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__XOR)
#define _CopyMskLineOr(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__OR)
#define _CopyMskLineAnd(d,s,m,w) __CPYMSKLINE__(d,s,m,w,__AND)
/*
* edge masking only for aligned line copy
* NOTE: called when the width of the middle part is 0 or when it
* has been copied using the VGA latches all four planes at once!
*/
#define __CPYMSKEDGE__(dst,src,masks,wdt,type) _ASV(" \n\
movl %0,%%edi \n\
movl %1,%%esi \n\
movl %2,%%ebx \n\
orb %%bl,%%bl \n\
jz L_CpyEdge"#type"SkipBody \n\
lodsb \n\
"type##MASK__(%%bl)" \n\
L_CpyEdge"#type"SkipBody: \n\
addl %3,%%edi \n\
addl %3,%%esi \n\
orb %%bh,%%bh \n\
jz L_CpyEdge"#type"NoLastMask \n\
lodsb \n\
"type##MASK__(%%bh)" \n\
L_CpyEdge"#type"NoLastMask: "\
: /* NOTHING */ \
: "g" (dst), "g" (src), "g" (masks), "g" (wdt) \
: "di", "si", "bx", "ax" \
)
#define _CopyMskEdge(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__CPY)
#define _CopyMskEdgeXor(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__XOR)
#define _CopyMskEdgeOr(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__OR)
#define _CopyMskEdgeAnd(d,s,m,w) __CPYMSKEDGE__(d,s,m,w,__AND)
/*
* non-aligned shifted copying (need separate fwd and reverse)
*/
#define __SHIFTLINE__(dst,src,sft,msk,wdt,type,dir) _ASV(" \n\
movl %0,%%edi \n\
movl %1,%%esi \n\
movl %2,%%ecx \n\
movl %3,%%ebx \n\
movl %4,%%edx \n\
andb $1,%%ch \n\
jz L_SftLine"#dir#type"ChkMask1 \n\
lodsb \n\
xorb %%ah,%%ah \n\
ro"#dir"w %%cl,%%ax \n\
movb %%ah,%%ch \n\
L_SftLine"#dir#type"ChkMask1: \n\
orb %%bl,%%bl \n\
jz L_SftLine"#dir#type"ChkWidth \n\
lodsb \n\
xorb %%ah,%%ah \n\
ro"#dir"w %%cl,%%ax \n\
orb %%ch,%%al \n\
movb %%ah,%%ch \n\
"type##MASK__(%%bl)" \n\
L_SftLine"#dir#type"ChkWidth: \n\
orl %%edx,%%edx \n\
jz L_SftLine"#dir#type"ChkMask2 \n\
L_SftLine"#dir#type"BodyLoop: \n\
lodsb \n\
xorb %%ah,%%ah \n\
ro"#dir"w %%cl,%%ax \n\
orb %%ch,%%al \n\
movb %%ah,%%ch \n\
"type##BYTE__" \n\
decl %%edx \n\
jnz L_SftLine"#dir#type"BodyLoop \n\
L_SftLine"#dir#type"ChkMask2: \n\
orb %%bh,%%bh \n\
jz L_SftLine"#dir#type"EndLine \n\
movl %2,%%edx \n\
andb $2,%%dh \n\
jnz L_SftLine"#dir#type"NoLastByte \n\
lodsb \n\
L_SftLine"#dir#type"NoLastByte: \n\
sh"#dir"b %%cl,%%al \n\
orb %%ch,%%al \n\
"type##MASK__(%%bh)" \n\
L_SftLine"#dir#type"EndLine: "\
: /* NOTHING */ \
: "g" (dst), "g" (src), "g" (sft), "g" (msk), "g" (wdt) \
: "di", "si", "dx", "cx", "bx", "ax" \
)
#define _FwdShiftLine(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__CPY,r)
#define _FwdShiftLineXor(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__XOR,r)
#define _FwdShiftLineOr(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__OR,r)
#define _FwdShiftLineAnd(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__AND,r)
#define _RevShiftLine(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__CPY,l)
#define _RevShiftLineXor(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__XOR,l)
#define _RevShiftLineOr(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__OR,l)
#define _RevShiftLineAnd(d,s,sft,m,w) __SHIFTLINE__(d,s,sft,m,w,__AND,l)
#endif /* __GNUC__ */
#endif /* whole file */