Initial Commit
This commit is contained in:
commit
1b87504c6d
|
@ -0,0 +1,40 @@
|
|||
# Created by https://www.gitignore.io/api/go,linux
|
||||
|
||||
### Go ###
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
### Go Patch ###
|
||||
/vendor/
|
||||
/Godeps/
|
||||
|
||||
### Linux ###
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
|
||||
# .nfs files are created when an open file is removed but is still being accessed
|
||||
.nfs*
|
||||
|
||||
# End of https://www.gitignore.io/api/go,linux
|
||||
|
||||
### Visual Studio Code ###
|
||||
/.vscode
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
dec-decoder: and iso.dec decoder written in Go
|
||||
==============================================
|
||||
|
||||
This tool implements the NASOS method of decoding .iso.dec files
|
||||
back into plain .iso files.
|
||||
|
||||
History
|
||||
-------
|
||||
|
||||
This tool is also written in Go and born of a frustration that:
|
||||
1. the NASOS tool is being used to archive isos
|
||||
2. the original NASOS tool is written as a .NET GUI tool for Windows
|
||||
3. it is hard to find a canonical site for getting the tool
|
||||
4. It requires downloading unknown binaries
|
||||
|
||||
This is a pure Go implementation that documents how this works and
|
||||
should work on all platforms supported by Go. This allevates all the
|
||||
concerns above.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
You need a working [Go](https://golang.org/) installation (I used Go 1.10.1 on Ubuntu Linux 18.04)
|
||||
|
||||
You will neet to install the required libraries:
|
||||
|
||||
go get github.com/jessevdk/go-flags
|
||||
|
||||
You can then build the tool by:
|
||||
|
||||
go install
|
||||
|
||||
Usage
|
||||
-----
|
||||
dec-decode [OPTIONS] Files...
|
||||
|
||||
Application Options:
|
||||
-v, --verbose show lots more information than is probably necessary
|
||||
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
|
||||
Arguments:
|
||||
Files: list of files to decode
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
)
|
||||
|
||||
var opts struct {
|
||||
Verbose bool `short:"v" long:"verbose" description:"show lots more information than is probably necessary"`
|
||||
Positional struct {
|
||||
Files []string `description:"list of files to decode" required:"true"`
|
||||
} `positional-args:"true" required:"true"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
_, err := flags.Parse(&opts)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, filePath := range opts.Positional.Files {
|
||||
fin, err := os.Open(filePath)
|
||||
errorExit(err)
|
||||
defer fin.Close()
|
||||
|
||||
fileName := path.Base(filePath)
|
||||
var outPath string
|
||||
if path.Ext(fileName) == ".dec" {
|
||||
outPath = path.Join(".", strings.TrimRight(fileName, ".dec"))
|
||||
} else {
|
||||
outPath = path.Join(".", fileName+".iso")
|
||||
}
|
||||
//outPath += ".dmp"
|
||||
|
||||
signature := readSignature(fin)
|
||||
fin.Seek(0, io.SeekStart)
|
||||
switch signature {
|
||||
case "GCML":
|
||||
case "GCMM":
|
||||
decodeGameCube(fin, outPath)
|
||||
case "WII5":
|
||||
decodeWii(fin, outPath, 0x1182800) //18360320
|
||||
case "WII9":
|
||||
decodeWii(fin, outPath, 0x1FB5000) //33247232
|
||||
default:
|
||||
fmt.Printf("Unknown filetype: %s when checking file %s\n", signature, fileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readSignature(r io.Reader) string {
|
||||
buffer := make([]byte, 4)
|
||||
_, err := io.ReadFull(r, buffer)
|
||||
errorExit(err)
|
||||
return string(buffer)
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func decodeGameCube(r *os.File, outPath string) {
|
||||
startSector := make([]byte, 0x2B8800)
|
||||
_, err := io.ReadFull(r, startSector)
|
||||
errorExit(err)
|
||||
|
||||
startBuffer := bytes.NewBuffer(startSector)
|
||||
|
||||
sig := startBuffer.Next(4)
|
||||
id := startBuffer.Next(4)
|
||||
hashValue := startBuffer.Next(16)
|
||||
startBuffer.Next(6)
|
||||
discNumber, err := startBuffer.ReadByte()
|
||||
errorExit(err)
|
||||
|
||||
fmt.Println("GameCube Disc")
|
||||
fmt.Printf("Signature: %s\n", string(sig))
|
||||
fmt.Printf("ID: %s\n", string(id))
|
||||
fmt.Printf("MD5: %x\n", hashValue)
|
||||
|
||||
w, err := os.Create(outPath)
|
||||
errorExit(err)
|
||||
defer w.Close()
|
||||
|
||||
bytesWritten := uint64(0)
|
||||
|
||||
padBlock := uint32(0)
|
||||
padOffset := uint64(0)
|
||||
padding := generatePaddingBlock(padBlock, id, uint32(discNumber))
|
||||
|
||||
transfer := make([]byte, 2048)
|
||||
|
||||
hash := md5.New()
|
||||
fmt.Printf("Writing Disc Data.....")
|
||||
for i := 0; i < 712880; i++ {
|
||||
if padOffset == 0x40000 {
|
||||
padBlock++
|
||||
padding = generatePaddingBlock(padBlock, id, uint32(discNumber))
|
||||
padOffset = 0
|
||||
}
|
||||
|
||||
rawOffset := binary.LittleEndian.Uint32(startBuffer.Next(4))
|
||||
if rawOffset != 0xffffffff {
|
||||
offset := rawOffset << 8
|
||||
checkOffset(r, offset)
|
||||
r.Seek(int64(offset), io.SeekStart)
|
||||
wrote := uint64(blockTransferWithHash(r, w, transfer, hash))
|
||||
bytesWritten += wrote
|
||||
padOffset += wrote
|
||||
} else {
|
||||
slice := padding[padOffset : padOffset+2048]
|
||||
io.Copy(hash, bytes.NewBuffer(slice))
|
||||
_, err = w.Write(slice)
|
||||
errorExit(err)
|
||||
bytesWritten += 2048
|
||||
padOffset += 2048
|
||||
}
|
||||
}
|
||||
fmt.Println("Done")
|
||||
|
||||
calcValue := hash.Sum(nil)
|
||||
if reflect.DeepEqual(hashValue, calcValue) {
|
||||
fmt.Printf("Decode OK: %x\n", hashValue)
|
||||
} else {
|
||||
fmt.Printf("Decode FAIL: expected: %x calculated: %x\n", hashValue, calcValue)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
func generatePaddingBlock(blockcount uint32, ID []byte, discnumber uint32) []byte {
|
||||
buffer := make([]uint32, 2084)
|
||||
array := make([]byte, 0x40000)
|
||||
num := 0
|
||||
num2 := uint32(0)
|
||||
blockcount = blockcount * 8 * 0x1EF29123
|
||||
for i := 0; i != 0x40000; i += 4 {
|
||||
if (i & 0x7FFF) == 0 {
|
||||
x1 := ((uint32(ID[2]) << 8) | uint32(ID[1])) << 16
|
||||
x2 := (uint32(ID[3]) + uint32(ID[2])) << 8
|
||||
num2 = x1 | x2 | uint32(ID[0]+ID[1])
|
||||
|
||||
num2 = (((num2 ^ discnumber) * 0x260BCD5) ^ blockcount)
|
||||
calcBlock(num2, buffer)
|
||||
num = 520
|
||||
blockcount += 0x1EF29123
|
||||
}
|
||||
num++
|
||||
if num == 521 {
|
||||
xorBlock(buffer)
|
||||
num = 0
|
||||
}
|
||||
array[i] = byte(buffer[num] >> 24)
|
||||
array[i+1] = byte(buffer[num] >> 18)
|
||||
array[i+2] = byte(buffer[num] >> 8)
|
||||
array[i+3] = byte(buffer[num])
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
func calcBlock(sample uint32, buffer []uint32) {
|
||||
num := uint32(0)
|
||||
for i := 0; i != 17; i++ {
|
||||
for j := 0; j < 32; j++ {
|
||||
sample *= 1566083941
|
||||
sample++
|
||||
num = uint32(int(num>>1) | (int(sample) & -2147483648))
|
||||
}
|
||||
buffer[i] = num
|
||||
}
|
||||
buffer[16] ^= ((buffer[0] >> 9) ^ (buffer[16] << 23))
|
||||
for i := 1; i != 505; i++ {
|
||||
buffer[i+16] = ((buffer[i-1] << 23) ^ (buffer[i] >> 9) ^ buffer[i+15])
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
xorBlock(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
func xorBlock(buffer []uint32) {
|
||||
var i int
|
||||
for i = 0; i != 32; i++ {
|
||||
buffer[i] ^= buffer[i+489]
|
||||
}
|
||||
for ; i != 521; i++ {
|
||||
buffer[i] ^= buffer[i-32]
|
||||
}
|
||||
}
|
||||
|
||||
func getIV(hashBlock []byte) []byte {
|
||||
iv := make([]byte, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
iv[i] = hashBlock[i+976]
|
||||
}
|
||||
return iv
|
||||
}
|
||||
|
||||
func encodeAES(p []byte, key []byte, iv []byte) []byte {
|
||||
block, err := aes.NewCipher(key)
|
||||
errorExit(err)
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
mode.CryptBlocks(p, p)
|
||||
|
||||
return p
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
func errorExit(err error) {
|
||||
if err != nil {
|
||||
log.Fatal("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func vLog(f string, v ...interface{}) {
|
||||
if opts.Verbose {
|
||||
log.Printf(f, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func position(s io.Seeker) (pos int64, err error) {
|
||||
return s.Seek(0, io.SeekCurrent)
|
||||
}
|
||||
|
||||
func checkPosition(s io.Seeker) int64 {
|
||||
pos, err := position(s)
|
||||
errorExit(err)
|
||||
return pos
|
||||
}
|
||||
|
||||
func checkOffset(s io.Seeker, offset uint32) {
|
||||
pos := checkPosition(s)
|
||||
if pos != int64(offset) {
|
||||
vLog("READ: position: %x expected: %x", pos, offset)
|
||||
}
|
||||
}
|
||||
|
||||
type byteProvider interface {
|
||||
Next(n int) []byte
|
||||
}
|
||||
|
||||
func readNextOffset(s io.Seeker, p byteProvider) uint32 {
|
||||
offset := binary.LittleEndian.Uint32(p.Next(4)) << 8
|
||||
checkOffset(s, offset)
|
||||
return offset
|
||||
}
|
||||
|
||||
func setNextOffset(s io.Seeker, p byteProvider) {
|
||||
offset := readNextOffset(s, p)
|
||||
s.Seek(int64(offset), io.SeekStart)
|
||||
}
|
||||
|
||||
func blockTransfer(r io.Reader, w io.Writer, buffer []byte) int {
|
||||
_, err := io.ReadFull(r, buffer)
|
||||
errorExit(err)
|
||||
wrote, err := w.Write(buffer)
|
||||
errorExit(err)
|
||||
return wrote
|
||||
}
|
||||
|
||||
func blockTransferWithHash(r io.Reader, w io.Writer, buffer []byte, hash hash.Hash) int {
|
||||
wrote := blockTransfer(r, w, buffer)
|
||||
io.Copy(hash, bytes.NewBuffer(buffer))
|
||||
return wrote
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type partitionInfo struct {
|
||||
PartitionType uint
|
||||
PartitionOffset uint64
|
||||
PartitionEndOffset uint64
|
||||
DataOffset uint64
|
||||
DataSize uint64
|
||||
PartitionKey []byte
|
||||
}
|
||||
|
||||
func decodeWii(r *os.File, outPath string, sectorSize uint) {
|
||||
startSector := make([]byte, sectorSize)
|
||||
_, err := io.ReadFull(r, startSector)
|
||||
errorExit(err)
|
||||
|
||||
startBuffer := bytes.NewBuffer(startSector)
|
||||
|
||||
sig := startBuffer.Next(4)
|
||||
id := startBuffer.Next(4)
|
||||
hashValue := startBuffer.Next(16)
|
||||
numPartitions := binary.LittleEndian.Uint32(startBuffer.Next(4))
|
||||
var partitions = make([]partitionInfo, numPartitions)
|
||||
|
||||
fmt.Println("Wii Disc")
|
||||
fmt.Printf("Signature: %s\n", string(sig))
|
||||
fmt.Printf("ID: %s\n", string(id))
|
||||
fmt.Printf("MD5: %x\n", hashValue)
|
||||
|
||||
for i := uint32(0); i < numPartitions; i++ {
|
||||
partitions[i].DataOffset = uint64(binary.LittleEndian.Uint32(startBuffer.Next(4))) << 2
|
||||
partitions[i].DataSize = uint64(binary.LittleEndian.Uint32(startBuffer.Next(4))) << 2
|
||||
partitions[i].PartitionOffset = uint64(binary.LittleEndian.Uint32(startBuffer.Next(4))) << 2
|
||||
partitions[i].PartitionEndOffset = uint64(binary.LittleEndian.Uint32(startBuffer.Next(4))) << 2
|
||||
partitions[i].PartitionKey = startBuffer.Next(16)
|
||||
|
||||
fmt.Printf("Partition %d of %d\n", i+1, numPartitions)
|
||||
fmt.Printf("--------------------\n")
|
||||
fmt.Printf("Data Offset: 0x%x\n", partitions[i].DataOffset)
|
||||
fmt.Printf("Data Size: 0x%x\n", partitions[i].DataSize)
|
||||
fmt.Printf("Partition Offset: 0x%x\n", partitions[i].PartitionOffset)
|
||||
fmt.Printf("Partition End: 0x%x\n", partitions[i].PartitionEndOffset)
|
||||
fmt.Printf("Partition Key: 0x%x\n", partitions[i].PartitionKey)
|
||||
fmt.Printf("====================\n")
|
||||
}
|
||||
|
||||
w, err := os.Create(outPath)
|
||||
errorExit(err)
|
||||
defer w.Close()
|
||||
|
||||
bytesWritten := uint64(0)
|
||||
|
||||
transfer := make([]byte, 1024)
|
||||
|
||||
hash := md5.New()
|
||||
fmt.Print("Writing Disc Header...")
|
||||
for i := uint64(0); i < partitions[0].PartitionOffset; i += 1024 {
|
||||
readNextOffset(r, startBuffer) // we don't need this
|
||||
bytesWritten += uint64(blockTransferWithHash(r, w, transfer, hash))
|
||||
vLog("\nWRITE: offset: %x\n", bytesWritten)
|
||||
}
|
||||
fmt.Println("Done")
|
||||
|
||||
for j := uint32(0); j < numPartitions; j++ {
|
||||
fmt.Printf("Writing Partition %d Header...", j)
|
||||
for i := uint64(0); i < partitions[j].DataOffset; i += 1024 {
|
||||
setNextOffset(r, startBuffer)
|
||||
bytesWritten += uint64(blockTransferWithHash(r, w, transfer, hash))
|
||||
vLog("\nWRITE: offset: %x\n", bytesWritten)
|
||||
}
|
||||
fmt.Println("Done")
|
||||
|
||||
padBlock := uint32(0)
|
||||
padOffset := uint64(0)
|
||||
dataSize := uint64(0)
|
||||
padding := make([]byte, 0x40000)
|
||||
|
||||
fmt.Printf("Writing Partition %d Data.....", j)
|
||||
for dataSize < partitions[j].DataSize {
|
||||
setNextOffset(r, startBuffer)
|
||||
|
||||
wrote := uint64(blockTransferWithHash(r, w, transfer, hash))
|
||||
bytesWritten += wrote
|
||||
dataSize += wrote
|
||||
vLog("\nWRITE: offset: %x\n", bytesWritten)
|
||||
|
||||
writeBuffer := NewFixedRecord(0x7C00)
|
||||
transfer2 := make([]byte, 1024)
|
||||
|
||||
for k := 0; k < 31; k++ {
|
||||
if (padOffset & 0x3FFFF) == 0 {
|
||||
vLog("\nPADDING: block: %d id: %s\n", padBlock, id)
|
||||
padding = generatePaddingBlock(padBlock, id, 0)
|
||||
padBlock++
|
||||
padOffset = 0
|
||||
}
|
||||
|
||||
rawOffset := binary.LittleEndian.Uint32(startBuffer.Next(4))
|
||||
if rawOffset != 0xffffffff {
|
||||
offset := rawOffset << 8
|
||||
checkOffset(r, offset)
|
||||
r.Seek(int64(offset), io.SeekStart)
|
||||
|
||||
padOffset += uint64(blockTransfer(r, writeBuffer, transfer2))
|
||||
vLog("\ntfr %d poffset: %x buffer: %x\n", k, padOffset, writeBuffer.Len())
|
||||
} else {
|
||||
slice := padding[padOffset : padOffset+1024]
|
||||
_, err = writeBuffer.Write(slice)
|
||||
errorExit(err)
|
||||
padOffset += 1024
|
||||
vLog("\npad %d poffset: %x buffer: %x\n", k, padOffset, writeBuffer.Len())
|
||||
}
|
||||
}
|
||||
iv := getIV(transfer)
|
||||
output := writeBuffer.Record()
|
||||
encodeAES(output, partitions[j].PartitionKey, iv)
|
||||
io.Copy(hash, bytes.NewBuffer(output))
|
||||
_, err = w.Write(output)
|
||||
errorExit(err)
|
||||
bytesWritten += 0x7C00
|
||||
vLog("\nWRITE: offset: %x\n", bytesWritten)
|
||||
|
||||
dataSize += 0x7C00
|
||||
vLog("\nDATA SIZE: %d / %d\n", dataSize, partitions[j].DataSize)
|
||||
}
|
||||
fmt.Println("Done")
|
||||
|
||||
fmt.Printf("Writing Partition %d Fill.....", j)
|
||||
padOffset = 0
|
||||
padBlock = uint32(bytesWritten / 0x40000)
|
||||
for bytesWritten != partitions[j].PartitionEndOffset {
|
||||
if (padOffset & 0x3FFFF) == 0 {
|
||||
vLog("\ngenerate padding: %d %s\n", padBlock, id)
|
||||
padding = generatePaddingBlock(padBlock, id, 0)
|
||||
padBlock++
|
||||
padOffset = 0
|
||||
}
|
||||
|
||||
rawOffset := binary.LittleEndian.Uint32(startBuffer.Next(4))
|
||||
if rawOffset != 0xffffffff {
|
||||
offset := rawOffset << 8
|
||||
checkOffset(r, offset)
|
||||
r.Seek(int64(offset), io.SeekStart)
|
||||
|
||||
wrote := uint64(blockTransferWithHash(r, w, transfer, hash))
|
||||
bytesWritten += wrote
|
||||
padOffset += wrote
|
||||
vLog("\ntfr WRITE: offset: %x\n", bytesWritten)
|
||||
} else {
|
||||
slice := padding[padOffset : padOffset+1024]
|
||||
io.Copy(hash, bytes.NewBuffer(slice))
|
||||
_, err = w.Write(slice)
|
||||
errorExit(err)
|
||||
bytesWritten += 1024
|
||||
padOffset += 1024
|
||||
vLog("\npad WRITE: offset: %x\n", bytesWritten)
|
||||
}
|
||||
}
|
||||
fmt.Println("Done")
|
||||
}
|
||||
calcValue := hash.Sum(nil)
|
||||
if reflect.DeepEqual(hashValue, calcValue) {
|
||||
fmt.Printf("Decode OK: %x\n", hashValue)
|
||||
} else {
|
||||
fmt.Printf("Decode FAIL: expected: %x calculated: %x\n", hashValue, calcValue)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
//FixedRecord provides a fixed storage Buffer that implements io.Writer
|
||||
type FixedRecord interface {
|
||||
Record() []byte
|
||||
Size() int
|
||||
Bytes() []byte
|
||||
Len() int
|
||||
Write(p []byte) (n int, err error)
|
||||
WriteString(s string) (n int, err error)
|
||||
WriteByte(c byte) error
|
||||
WriteRune(r rune) (n int, err error)
|
||||
}
|
||||
|
||||
type internalFixedRecord struct {
|
||||
buf []byte
|
||||
off int
|
||||
}
|
||||
|
||||
func (b *internalFixedRecord) Record() []byte { return b.buf }
|
||||
|
||||
func (b *internalFixedRecord) Size() int { return len(b.buf) }
|
||||
|
||||
func (b *internalFixedRecord) Bytes() []byte { return b.buf[0:b.off] }
|
||||
|
||||
func (b *internalFixedRecord) Len() int { return b.off }
|
||||
|
||||
func (b *internalFixedRecord) Write(p []byte) (n int, err error) {
|
||||
if b.off+len(p) > b.Size() {
|
||||
return 0, errors.New("too much data to write")
|
||||
}
|
||||
n = copy(b.buf[b.off:], p)
|
||||
b.off += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *internalFixedRecord) WriteString(s string) (n int, err error) {
|
||||
if b.off+len(s) > b.Size() {
|
||||
return 0, errors.New("too much data to write")
|
||||
}
|
||||
n = copy(b.buf[b.off:], s)
|
||||
b.off += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *internalFixedRecord) WriteByte(c byte) error {
|
||||
if b.off+1 > b.Size() {
|
||||
return errors.New("too much data to write")
|
||||
}
|
||||
b.buf[b.off] = c
|
||||
b.off++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *internalFixedRecord) WriteRune(r rune) (n int, err error) {
|
||||
if b.off+utf8.RuneLen(r) > b.Size() {
|
||||
return 0, errors.New("too much data to write")
|
||||
}
|
||||
n = utf8.EncodeRune(b.buf[b.off:b.off+utf8.UTFMax], r)
|
||||
b.off += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
//NewFixedRecord creates a write buffer with a given size
|
||||
func NewFixedRecord(size int) FixedRecord {
|
||||
return &internalFixedRecord{buf: make([]byte, size)}
|
||||
}
|
Loading…
Reference in New Issue