* = $0010 fill ;x = x2 ;y = y2 lda #$f8 sax <f_jmp+1 ;set initially, as it is not set on every turn later on f_back ;common entry point where all code segments reenter when done dey f_yend cpy #$00 ;forces carry to be set \o/ bcc f_end f_err lda #$00 ;restore error f_dx1 sbc #$00 ;do that bresenhamthingy for xend, code will be setup for either flat or steep slope f_code bcs + ;inx ;in case of flat slopes bcs_start dex ;bcs * - 3 ; f_dx2 adc #$00 sta <f_err+1 lda #$f8 sax <f_jmp+1 ;update start of span, depending on bit 7 stuff is rendered to buffer 1 or 2 (offset of $80 in the table) bne ++ ;so buttugly, but need to skip bcs_end + sta <f_err+1 ;save error ++ lda xstart,y ;load previously calced x1 sta <f_msk+1 ;setup mask without tainting X arr #$78 ;-> carry is still set, bit 7 always cleared. This way we generate values from $80 .. $bc, a range to which we adopt the memory layout of the row tables sta <f_jmp+2 ;update byte of jump responsible to select all code-segments that start with xstart f_patt lda patt_0,y ;fetch pattern f_msk and maskl ;apply mask for left edge f_jmp jmp ($1000) ;do it! \o/ f_end rts
sta .addr,y ;write through, smart poly order avoids clashes lda (f_patt+1),y ;refetch pattern, expensive, but at least less than sta patt, lda patt sta .addr + $080,y sta .addr + $100,y sta .addr + $180,y sta .addr + $200,y sta .addr + $280,y sta .addr + $300,y and maskr,x ;right edge ora .addr + $380,y ;need to ora here sta .addr + $380,y jmp f_back
f_err lda #$00
pla
sta <f_err+1
pha