diff --git a/.gitignore b/.gitignore index 5be2b41..4cc2015 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ Thumbs.db .idea/ cover* test +*.swp diff --git a/.golangci.json b/.golangci.json new file mode 100644 index 0000000..8a6c6a8 --- /dev/null +++ b/.golangci.json @@ -0,0 +1,56 @@ +{ + "run": { + "tests": false, + "timeout": "5m" + }, + "linters": { + "enable": [ + "asciicheck", + "bodyclose", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errname", + "errorlint", + "exportloopref", + "forcetypeassert", + "funlen", + "gocognit", + "gocritic", + "gocyclo", + "godot", + "goerr113", + "gofumpt", + "goprintffuncname", + "gosimple", + "govet", + "ineffassign", + "lll", + "misspell", + "nakedret", + "nestif", + "noctx", + "nolintlint", + "revive", + "rowserrcheck", + "staticcheck", + "structcheck", + "testpackage", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "whitespace" + ] + }, + "linters-settings": { + "funlen": {}, + "gocritic": { + "disabled-checks": ["commentFormatting"] + } + } +} diff --git a/binary.go b/binary.go new file mode 100644 index 0000000..c903c14 --- /dev/null +++ b/binary.go @@ -0,0 +1,48 @@ +package astits + +import ( + "io" + "log" + + "github.com/icza/bitio" +) + +// WriterAndByteWriter An io.Writer and io.ByteWriter at the same time. +type WriterAndByteWriter interface { + io.Writer + io.ByteWriter +} + +// ReaderAndByteReader An io.Reader and io.ByteReader at the same time. +type ReaderAndByteReader interface { + io.Reader + io.ByteReader +} + +// WriteBinary . +func WriteBinary(w *bitio.Writer, str string) error { + for _, r := range str { + var err error + + switch r { + case '1': + err = w.WriteBool(true) + case '0': + err = w.WriteBool(false) + default: + log.Fatalf("invalid rune: %v", r) + } + + if err != nil { + return err + } + } + return nil +} + +// TryReadFull . +func TryReadFull(r *bitio.CountReader, p []byte) { + if r.TryError == nil { + _, r.TryError = io.ReadFull(r, p) + } +} diff --git a/binary_test.go b/binary_test.go new file mode 100644 index 0000000..aca22cf --- /dev/null +++ b/binary_test.go @@ -0,0 +1,137 @@ +package astits + +import ( + "bytes" + "fmt" + "io" + "testing" + + "github.com/icza/bitio" + "github.com/stretchr/testify/require" +) + +func TestBitsWriter(t *testing.T) { + // TODO Need to test LittleEndian + bw := &bytes.Buffer{} + w := bitio.NewWriter(bw) + + err := WriteBinary(w, "000000") + require.NoError(t, err) + require.Equal(t, 0, bw.Len()) + + err = w.WriteBool(false) + require.NoError(t, err) + + err = w.WriteBool(true) + require.Equal(t, []byte{1}, bw.Bytes()) + + _, err = w.Write([]byte{2, 3}) + require.NoError(t, err) + require.Equal(t, []byte{1, 2, 3}, bw.Bytes()) + + err = w.WriteBits(uint64(4), 8) + require.NoError(t, err) + require.Equal(t, []byte{1, 2, 3, 4}, bw.Bytes()) + + err = w.WriteBits(uint64(5), 16) + require.NoError(t, err) + require.Equal(t, []byte{1, 2, 3, 4, 0, 5}, bw.Bytes()) + + err = w.WriteBits(uint64(6), 32) + require.NoError(t, err) + require.Equal(t, []byte{1, 2, 3, 4, 0, 5, 0, 0, 0, 6}, bw.Bytes()) + + err = w.WriteBits(uint64(7), 64) + require.NoError(t, err) + require.Equal(t, []byte{1, 2, 3, 4, 0, 5, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7}, bw.Bytes()) + + bw.Reset() + err = w.WriteBits(uint64(4), 3) + require.NoError(t, err) + + err = w.WriteBits(uint64(4096), 13) + require.NoError(t, err) + require.Equal(t, []byte{144, 0}, bw.Bytes()) +} + +// testLimitedWriter is an implementation of io.Writer +// with max write size limit to test error handling +type testLimitedWriter struct { + BytesLimit int +} + +func (t *testLimitedWriter) Write(p []byte) (n int, err error) { + t.BytesLimit -= len(p) + if t.BytesLimit >= 0 { + return len(p), nil + } + return len(p) + t.BytesLimit, io.EOF +} + +func (t *testLimitedWriter) WriteByte(c byte) error { + _, err := t.Write([]byte{c}) + return err +} + +func BenchmarkBitsWriter_Write(b *testing.B) { + benchmarks := []func(*bitio.Writer){ + func(w *bitio.Writer) { WriteBinary(w, "000000") }, + func(w *bitio.Writer) { w.WriteBool(false) }, + func(w *bitio.Writer) { w.WriteBool(true) }, + func(w *bitio.Writer) { w.Write([]byte{2, 3}) }, + func(w *bitio.Writer) { w.WriteByte(uint8(4)) }, + func(w *bitio.Writer) { w.WriteBits(uint64(5), 16) }, + func(w *bitio.Writer) { w.WriteBits(uint64(6), 32) }, + func(w *bitio.Writer) { w.WriteBits(uint64(7), 64) }, + } + + bw := &bytes.Buffer{} + bw.Grow(1024) + w := bitio.NewWriter(bw) + + for i, bm := range benchmarks { + b.Run(fmt.Sprintf("%v", i), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + bw.Reset() + bm(w) + } + }) + } +} + +func BenchmarkBitsWriter_WriteN(b *testing.B) { + type benchData func(w *bitio.Writer) + benchmarks := []benchData{} + var i uint8 + for i = 1; i <= 8; i++ { + benchmarks = append(benchmarks, + func(w *bitio.Writer) { w.WriteBits(0xff, i) }) + } + for i = 1; i <= 16; i++ { + benchmarks = append(benchmarks, + func(w *bitio.Writer) { w.WriteBits(0xffff, i) }) + } + for i = 1; i <= 32; i++ { + benchmarks = append(benchmarks, + func(w *bitio.Writer) { w.WriteBits(0xffffffff, i) }) + } + for i = 1; i <= 64; i++ { + benchmarks = append(benchmarks, + func(w *bitio.Writer) { w.WriteBits(0xffffffffffffffff, i) }) + } + + bw := &bytes.Buffer{} + bw.Grow(1024) + w := bitio.NewWriter(bw) + + for i, bm := range benchmarks { + b.Run(fmt.Sprintf("%v", i), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + bw.Reset() + bm(w) + } + }) + } +} diff --git a/clock_reference.go b/clock_reference.go index 73c147c..e710a11 100644 --- a/clock_reference.go +++ b/clock_reference.go @@ -4,13 +4,13 @@ import ( "time" ) -// ClockReference represents a clock reference -// Base is based on a 90 kHz clock and extension is based on a 27 MHz clock +// ClockReference represents a clock reference. +// Base is based on a 90 kHz clock and extension is based on a 27 MHz clock. type ClockReference struct { Base, Extension int64 } -// newClockReference builds a new clock reference +// newClockReference builds a new clock reference. func newClockReference(base, extension int64) *ClockReference { return &ClockReference{ Base: base, @@ -18,12 +18,12 @@ func newClockReference(base, extension int64) *ClockReference { } } -// Duration converts the clock reference into duration +// Duration converts the clock reference into duration. func (p ClockReference) Duration() time.Duration { return time.Duration(p.Base*1e9/90000) + time.Duration(p.Extension*1e9/27000000) } -// Time converts the clock reference into time +// Time converts the clock reference into time. func (p ClockReference) Time() time.Time { return time.Unix(0, p.Duration().Nanoseconds()) } diff --git a/cmd/astits-es-split/main.go b/cmd/astits-es-split/main.go index 775268d..89e1b5f 100644 --- a/cmd/astits-es-split/main.go +++ b/cmd/astits-es-split/main.go @@ -3,8 +3,10 @@ package main import ( "bufio" "context" + "errors" "flag" "fmt" + "io" "log" "os" "path" @@ -23,7 +25,7 @@ type muxerOut struct { w *bufio.Writer } -func main() { +func main() { // nolint:funlen,gocognit,gocyclo flag.Usage = func() { fmt.Fprintf(flag.CommandLine.Output(), "Split TS file into multiple files each holding one elementary stream") fmt.Fprintf(flag.CommandLine.Output(), "%s INPUT_FILE [FLAGS]:\n", os.Args[0]) @@ -41,7 +43,7 @@ func main() { _, err = os.Stat(*outDir) if !os.IsNotExist(err) { - log.Fatalf("can't write to `%s': already exists", *outDir) + log.Fatalf("can't write to `%s': already exists", *outDir) // nolint:gocritic } if err = os.MkdirAll(*outDir, os.ModePerm); err != nil { @@ -69,7 +71,7 @@ func main() { for { d, err := demux.NextData() if err != nil { - if err == astits.ErrNoMorePackets { + if errors.Is(err, io.EOF) { break } log.Fatalf("%v", err) @@ -81,7 +83,7 @@ func main() { continue } - if d.PMT != nil { + if d.PMT != nil { // nolint:nestif pmts[d.PMT.ProgramNumber] = d.PMT gotAllPMTs = true @@ -191,7 +193,8 @@ func main() { } timeDiff := time.Since(timeStarted) - log.Printf("%d bytes written at rate %.02f mb/s", bytesWritten, (float64(bytesWritten)/1024.0/1024.0)/timeDiff.Seconds()) + log.Printf("%d bytes written at rate %.02f mb/s", bytesWritten, + (float64(bytesWritten)/1024.0/1024.0)/timeDiff.Seconds()) for _, f := range outfiles { if err = f.w.Flush(); err != nil { diff --git a/cmd/astits-probe/main.go b/cmd/astits-probe/main.go index 8591e24..cb408f2 100644 --- a/cmd/astits-probe/main.go +++ b/cmd/astits-probe/main.go @@ -20,7 +20,7 @@ import ( "github.com/pkg/profile" ) -// Flags +// Flags. var ( ctx, cancel = context.WithCancel(context.Background()) cpuProfiling = flag.Bool("cp", false, "if yes, cpu profiling is enabled") @@ -30,7 +30,7 @@ var ( memoryProfiling = flag.Bool("mp", false, "if yes, memory profiling is enabled") ) -func main() { +func main() { //nolint:funlen // Init flag.Usage = func() { fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s :\n", os.Args[0]) @@ -53,8 +53,8 @@ func main() { // Build the reader var r io.Reader var err error - if r, err = buildReader(ctx); err != nil { - log.Fatal(fmt.Errorf("astits: parsing input failed: %w", err)) + if r, err = buildReader(); err != nil { + log.Fatal(fmt.Errorf("astits: parsing input failed: %w", err)) // nolint:gocritic } // Make sure the reader is closed properly @@ -63,7 +63,7 @@ func main() { } // Create the demuxer - var dmx = astits.NewDemuxer(ctx, r) + dmx := astits.NewDemuxer(ctx, r) // Switch on command switch cmd { @@ -87,7 +87,7 @@ func main() { // Print switch *format { case "json": - var e = json.NewEncoder(os.Stdout) + e := json.NewEncoder(os.Stdout) e.SetIndent("", " ") if err = e.Encode(pgms); err != nil { log.Fatal(fmt.Errorf("astits: json encoding to stdout failed: %w", err)) @@ -116,10 +116,10 @@ func handleSignals() { }() } -func buildReader(ctx context.Context) (r io.Reader, err error) { +func buildReader() (r io.Reader, err error) { // Validate input - if len(*inputPath) <= 0 { - err = errors.New("use -i to indicate an input path") + if len(*inputPath) == 0 { + err = errors.New("use -i to indicate an input path") // nolint:goerr113 return } @@ -146,7 +146,7 @@ func buildReader(ctx context.Context) (r io.Reader, err error) { err = fmt.Errorf("astits: listening on multicast udp addr %s failed: %w", u.Host, err) return } - c.SetReadBuffer(4096) + c.SetReadBuffer(4096) // nolint:errcheck r = c default: // Open file @@ -157,7 +157,7 @@ func buildReader(ctx context.Context) (r io.Reader, err error) { } r = f } - return + return r, err } func packets(dmx *astits.Demuxer) (err error) { @@ -167,7 +167,7 @@ func packets(dmx *astits.Demuxer) (err error) { for { // Get next packet if p, err = dmx.NextPacket(); err != nil { - if err == astits.ErrNoMorePackets { + if errors.Is(err, io.EOF) { break } err = fmt.Errorf("astits: getting next packet failed: %w", err) @@ -190,7 +190,7 @@ func packets(dmx *astits.Demuxer) (err error) { return nil } -func data(dmx *astits.Demuxer) (err error) { +func data(dmx *astits.Demuxer) (err error) { // nolint:funlen,gocognit,gocyclo // Determine which data to log var logAll, logEIT, logNIT, logPAT, logPES, logPMT, logSDT, logTOT bool if _, ok := dataTypes.Map["all"]; ok { @@ -224,7 +224,7 @@ func data(dmx *astits.Demuxer) (err error) { for { // Get next data if d, err = dmx.NextData(); err != nil { - if err == astits.ErrNoMorePackets { + if errors.Is(err, io.EOF) { break } err = fmt.Errorf("astits: getting next data failed: %w", err) @@ -232,24 +232,29 @@ func data(dmx *astits.Demuxer) (err error) { } // Log data - if d.EIT != nil && (logAll || logEIT) { + switch { + case d.EIT != nil && (logAll || logEIT): log.Printf("EIT: %d\n", d.PID) log.Println(eventsToString(d.EIT.Events)) - } else if d.NIT != nil && (logAll || logNIT) { + + case d.NIT != nil && (logAll || logNIT): log.Printf("NIT: %d\n", d.PID) - } else if d.PAT != nil && (logAll || logPAT) { + + case d.PAT != nil && (logAll || logPAT): log.Printf("PAT: %d\n", d.PID) log.Printf(" Transport Stream ID: %v\n", d.PAT.TransportStreamID) log.Println(" Programs:") for _, p := range d.PAT.Programs { log.Printf(" %+v\n", p) } - } else if d.PES != nil && (logAll || logPES) { + + case d.PES != nil && (logAll || logPES): log.Printf("PES: %d\n", d.PID) log.Printf(" Stream ID: %v\n", d.PES.Header.StreamID) log.Printf(" Packet Length: %v\n", d.PES.Header.PacketLength) log.Printf(" Optional Header: %+v\n", d.PES.Header.OptionalHeader) - } else if d.PMT != nil && (logAll || logPMT) { + + case d.PMT != nil && (logAll || logPMT): log.Printf("PMT: %d\n", d.PID) log.Printf(" ProgramNumber: %v\n", d.PMT.ProgramNumber) log.Printf(" PCR PID: %v\n", d.PMT.PCRPID) @@ -261,25 +266,27 @@ func data(dmx *astits.Demuxer) (err error) { for _, d := range d.PMT.ProgramDescriptors { log.Printf(" %+v\n", d) } - } else if d.SDT != nil && (logAll || logSDT) { + + case d.SDT != nil && (logAll || logSDT): log.Printf("SDT: %d\n", d.PID) - } else if d.TOT != nil && (logAll || logTOT) { + + case d.TOT != nil && (logAll || logTOT): log.Printf("TOT: %d\n", d.PID) } } - return + return err } -func programs(dmx *astits.Demuxer) (o []*Program, err error) { +func programs(dmx *astits.Demuxer) (o []*Program, err error) { // nolint:funlen,gocognit // Loop through data var d *astits.DemuxerData - var pgmsToProcess = make(map[uint16]bool) - var pgms = make(map[uint16]*Program) + pgmsToProcess := make(map[uint16]bool) + pgms := make(map[uint16]*Program) log.Println("Fetching data...") for { // Get next data if d, err = dmx.NextData(); err != nil { - if err == astits.ErrNoMorePackets { + if errors.Is(err, io.EOF) { err = nil break } @@ -288,7 +295,7 @@ func programs(dmx *astits.Demuxer) (o []*Program, err error) { } // Check data - if d.PAT != nil { + if d.PAT != nil { //nolint:nestif // Build programs list for _, p := range d.PAT.Programs { // Program number 0 is reserved to NIT @@ -313,7 +320,7 @@ func programs(dmx *astits.Demuxer) (o []*Program, err error) { // Add elementary streams for _, es := range d.PMT.ElementaryStreams { - var s = newStream(es.ElementaryPID, es.StreamType) + s := newStream(es.ElementaryPID, es.StreamType) for _, d := range es.ElementaryStreamDescriptors { s.Descriptors = append(s.Descriptors, descriptorToString(d)) } @@ -334,10 +341,10 @@ func programs(dmx *astits.Demuxer) (o []*Program, err error) { for _, p := range pgms { o = append(o, p) } - return + return o, err } -// Program represents a program +// Program represents a program. type Program struct { Descriptors []string `json:"descriptors,omitempty"` ID uint16 `json:"id,omitempty"` @@ -345,7 +352,7 @@ type Program struct { Streams []*Stream `json:"streams,omitempty"` } -// Stream represents a stream +// Stream represents a stream. type Stream struct { Descriptors []string `json:"descriptors,omitempty"` ID uint16 `json:"id,omitempty"` @@ -366,7 +373,7 @@ func newStream(id uint16, _type astits.StreamType) *Stream { } } -// String implements the Stringer interface +// String implements the Stringer interface. func (p Program) String() (o string) { o = fmt.Sprintf("[%d] - Map ID: %d", p.ID, p.MapID) for _, d := range p.Descriptors { @@ -378,10 +385,10 @@ func (p Program) String() (o string) { return } -// String implements the Stringer interface +// String implements the Stringer interface. func (s Stream) String() (o string) { // Get type - var t = fmt.Sprintf("unlisted stream type %d", s.Type) + t := fmt.Sprintf("unlisted stream type %d", s.Type) switch s.Type { case astits.StreamTypeMPEG1Audio: t = "MPEG-1 audio" @@ -414,7 +421,8 @@ func eventsToString(es []*astits.EITDataEvent) string { } func eventToString(idx int, e *astits.EITDataEvent) (s string) { - s += fmt.Sprintf("- #%d | id: %d | start: %s | duration: %s | status: %s\n", idx+1, e.EventID, e.StartTime.Format("15:04:05"), e.Duration, runningStatusToString(e.RunningStatus)) + s += fmt.Sprintf("- #%d | id: %d | start: %s | duration: %s | status: %s\n", + idx+1, e.EventID, e.StartTime.Format("15:04:05"), e.Duration, runningStatusToString(e.RunningStatus)) var os []string for _, d := range e.Descriptors { os = append(os, " - "+descriptorToString(d)) @@ -434,16 +442,23 @@ func runningStatusToString(s uint8) string { return "unknown" } -func descriptorToString(d *astits.Descriptor) string { +func descriptorToString(d *astits.Descriptor) string { // nolint:funlen switch d.Tag { case astits.DescriptorTagAC3: - return fmt.Sprintf("[AC3] ac3 asvc: %d | bsid: %d | component type: %d | mainid: %d | info: %s", d.AC3.ASVC, d.AC3.BSID, d.AC3.ComponentType, d.AC3.MainID, d.AC3.AdditionalInfo) + return fmt.Sprintf("[AC3] ac3 asvc: %d | bsid: %d | component type: %d | mainid: %d | info: %s", + d.AC3.ASVC, d.AC3.BSID, d.AC3.ComponentType, d.AC3.MainID, d.AC3.AdditionalInfo) case astits.DescriptorTagComponent: - return fmt.Sprintf("[Component] language: %s | text: %s | component tag: %d | component type: %d | stream content: %d | stream content ext: %d", d.Component.ISO639LanguageCode, d.Component.Text, d.Component.ComponentTag, d.Component.ComponentType, d.Component.StreamContent, d.Component.StreamContentExt) + return fmt.Sprintf("[Component] language: %s | text: %s |"+ + " component tag: %d | component type: %d"+ + " | stream content: %d | stream content ext: %d", + d.Component.ISO639LanguageCode, d.Component.Text, + d.Component.ComponentTag, d.Component.ComponentType, + d.Component.StreamContent, d.Component.StreamContentExt) case astits.DescriptorTagContent: var os []string for _, i := range d.Content.Items { - os = append(os, fmt.Sprintf("content nibble 1: %d | content nibble 2: %d | user byte: %d", i.ContentNibbleLevel1, i.ContentNibbleLevel2, i.UserByte)) + os = append(os, fmt.Sprintf("content nibble 1: %d | content nibble 2: %d | user byte: %d", + i.ContentNibbleLevel1, i.ContentNibbleLevel2, i.UserByte)) } return "[Content] " + strings.Join(os, " - ") case astits.DescriptorTagExtendedEvent: @@ -453,7 +468,8 @@ func descriptorToString(d *astits.Descriptor) string { } return s case astits.DescriptorTagISO639LanguageAndAudioType: - return fmt.Sprintf("[ISO639 language and audio type] language: %s | audio type: %d", d.ISO639LanguageAndAudioType.Language, d.ISO639LanguageAndAudioType.Type) + return fmt.Sprintf("[ISO639 language and audio type] language: %s | audio type: %d", + d.ISO639LanguageAndAudioType.Language, d.ISO639LanguageAndAudioType.Type) case astits.DescriptorTagMaximumBitrate: return fmt.Sprintf("[Maximum bitrate] maximum bitrate: %d", d.MaximumBitrate.Bitrate) case astits.DescriptorTagNetworkName: @@ -469,13 +485,15 @@ func descriptorToString(d *astits.Descriptor) string { case astits.DescriptorTagService: return fmt.Sprintf("[Service] service %s | provider: %s", d.Service.Name, d.Service.Provider) case astits.DescriptorTagShortEvent: - return fmt.Sprintf("[Short event] language: %s | name: %s | text: %s", d.ShortEvent.Language, d.ShortEvent.EventName, d.ShortEvent.Text) + return fmt.Sprintf("[Short event] language: %s | name: %s | text: %s", + d.ShortEvent.Language, d.ShortEvent.EventName, d.ShortEvent.Text) case astits.DescriptorTagStreamIdentifier: return fmt.Sprintf("[Stream identifier] stream identifier component tag: %d", d.StreamIdentifier.ComponentTag) case astits.DescriptorTagSubtitling: var os []string for _, i := range d.Subtitling.Items { - os = append(os, fmt.Sprintf("subtitling composition page: %d | ancillary page %d: %s", i.CompositionPageID, i.AncillaryPageID, i.Language)) + os = append(os, fmt.Sprintf("subtitling composition page: %d | ancillary page %d: %s", + i.CompositionPageID, i.AncillaryPageID, i.Language)) } return "[Subtitling] " + strings.Join(os, " - ") case astits.DescriptorTagTeletext: diff --git a/crc32.go b/crc32.go index 5a3f601..fc36e72 100644 --- a/crc32.go +++ b/crc32.go @@ -1,25 +1,91 @@ package astits -const ( - crc32Polynomial = uint32(0xffffffff) -) - -// computeCRC32 computes a CRC32 -// https://stackoverflow.com/questions/35034042/how-to-calculate-crc32-in-psi-si-packet -func computeCRC32(bs []byte) uint32 { - return updateCRC32(crc32Polynomial, bs) -} - -func updateCRC32(crc32 uint32, bs []byte) uint32 { - for _, b := range bs { - for i := 0; i < 8; i++ { - if (crc32 >= uint32(0x80000000)) != (b >= uint8(0x80)) { - crc32 = (crc32 << 1) ^ 0x04C11DB7 - } else { - crc32 = crc32 << 1 - } - b <<= 1 +const crc32Polynomial = 0xffffffff + +// CRC32Writer calculates CRC32 for written bytes. +type CRC32Writer struct { + out WriterAndByteWriter + crc32 uint32 +} + +// NewCRC32Writer returns a CRC32Writer with initial polynomial. +func NewCRC32Writer(w WriterAndByteWriter) *CRC32Writer { + return &CRC32Writer{ + out: w, + crc32: crc32Polynomial, + } +} + +// Write implements io.Writer . +func (w *CRC32Writer) Write(p []byte) (int, error) { + n, err := w.out.Write(p) + for i := 0; i < n; i++ { + w.crc32 = updateCRC32(w.crc32, p[n-1]) + } + return n, err +} + +// WriteByte implements io.ByteWriter . +func (w *CRC32Writer) WriteByte(b byte) error { + w.crc32 = updateCRC32(w.crc32, b) + return w.out.WriteByte(b) +} + +// CRC32 returns current checksum. +func (w *CRC32Writer) CRC32() uint32 { + return w.crc32 +} + +// CRC32Reader calculates checksum for read bytes. +type CRC32Reader struct { + rd ReaderAndByteReader + crc32 uint32 +} + +// NewCRC32Reader returns a CRC32Reader with initial polynomial. +func NewCRC32Reader(rd ReaderAndByteReader) *CRC32Reader { + return &CRC32Reader{ + rd: rd, + crc32: crc32Polynomial, + } +} + +// Read implements io.Reader . +func (r *CRC32Reader) Read(p []byte) (int, error) { + n, err := r.rd.Read(p) + for i := 0; i < n; i++ { + r.crc32 = updateCRC32(r.crc32, p[n-1]) + } + return n, err + /*b, err := r.ReadByte() + n := copy(p, []byte{b}) + return n, err*/ +} + +// ReadByte implements io.ByteReader. +func (r *CRC32Reader) ReadByte() (byte, error) { + b, err := r.rd.ReadByte() + if err != nil { + return 0, err + } + + r.crc32 = updateCRC32(r.crc32, b) + return b, nil +} + +// CRC32 returns current checksum. +func (r *CRC32Reader) CRC32() uint32 { + return r.crc32 +} + +func updateCRC32(crc32 uint32, b byte) uint32 { + for i := 0; i < 8; i++ { + if (crc32 >= 0x80000000) != (b >= 0x80) { + crc32 = (crc32 << 1) ^ 0x04C11DB7 + } else { + crc32 = crc32 << 1 //nolint:gocritic } + b <<= 1 } return crc32 } diff --git a/data.go b/data.go index 08dbecd..6d20557 100644 --- a/data.go +++ b/data.go @@ -1,20 +1,33 @@ package astits import ( + "bytes" "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// PIDs +// PIDs. const ( - PIDPAT uint16 = 0x0 // Program Association Table (PAT) contains a directory listing of all Program Map Tables. - PIDCAT uint16 = 0x1 // Conditional Access Table (CAT) contains a directory listing of all ITU-T Rec. H.222 entitlement management message streams used by Program Map Tables. - PIDTSDT uint16 = 0x2 // Transport Stream Description Table (TSDT) contains descriptors related to the overall transport stream - PIDNull uint16 = 0x1fff // Null Packet (used for fixed bandwidth padding) + // Program Association Table (PAT) contains a + // directory listing of all Program Map Tables. + PIDPAT uint16 = 0x0 + + // Conditional Access Table (CAT) contains + // a directory listing of all ITU-T Rec. + // H.222 entitlement management message + // streams used by Program Map Tables. + PIDCAT uint16 = 0x1 + + // Transport Stream Description Table (TSDT) contains + // descriptors related to the overall transport stream. + PIDTSDT uint16 = 0x2 + + // Null Packet (used for fixed bandwidth padding). + PIDNull uint16 = 0x1fff ) -// DemuxerData represents a data parsed by Demuxer +// DemuxerData represents a data parsed by Demuxer. type DemuxerData struct { EIT *EITData FirstPacket *Packet @@ -27,87 +40,88 @@ type DemuxerData struct { TOT *TOTData } -// MuxerData represents a data to be written by Muxer +// MuxerData represents a data to be written by Muxer. type MuxerData struct { PID uint16 AdaptationField *PacketAdaptationField PES *PESData } -// parseData parses a payload spanning over multiple packets and returns a set of data -func parseData(ps []*Packet, prs PacketsParser, pm *programMap) (ds []*DemuxerData, err error) { +// parseData parses a payload spanning over +// multiple packets and returns a set of data. +func parseData( + pkts []*Packet, + prs PacketsParser, + pm *programMap, +) ([]*DemuxerData, error) { // Use custom parser first + var ds []*DemuxerData if prs != nil { - var skip bool - if ds, skip, err = prs(ps); err != nil { - err = fmt.Errorf("astits: custom packets parsing failed: %w", err) - return - } else if skip { - return + data, skip, err := prs(pkts) + if err != nil { + return nil, fmt.Errorf("custom packets parsing failed: %w", err) } + if skip { + return data, nil + } + ds = data } - // Get payload length - var l int - for _, p := range ps { - l += len(p.Payload) + var payloadLength int64 + for _, p := range pkts { + payloadLength += int64(len(p.Payload) * 8) } - - // Append payload - var payload = make([]byte, l) - var c int - for _, p := range ps { - c += copy(payload[c:], p.Payload) + payload := make([]byte, payloadLength/8) + var n int + for _, pkt := range pkts { + n += copy(payload[n:], pkt.Payload) } - // Create reader - i := astikit.NewBytesIterator(payload) - - // Parse PID - pid := ps[0].Header.PID + pid := pkts[0].Header.PID // Parse payload if pid == PIDCAT { - // Information in a CAT payload is private and dependent on the CA system. Use the PacketsParser - // to parse this type of payload - } else if isPSIPayload(pid, pm) { - // Parse PSI data - var psiData *PSIData - if psiData, err = parsePSIData(i); err != nil { - err = fmt.Errorf("astits: parsing PSI data failed: %w", err) - return - } + // Information in a CAT payload is private and dependent on the CA system. + // Use the PacketsParser to parse this type of payload. + return ds, nil + } - // Append data - ds = psiData.toData(ps[0], pid) - } else if isPESPayload(payload) { - // Parse PES data - var pesData *PESData - if pesData, err = parsePESData(i); err != nil { - err = fmt.Errorf("astits: parsing PES data failed: %w", err) - return + r := bitio.NewCountReader(bytes.NewReader(payload)) + + if isPSIPayload(pid, pm) { + psiData, err := parsePSIData(r) + if err != nil { + return nil, fmt.Errorf("parsing PSI data failed: %w", err) } + ds = psiData.toData(pkts[0], pid) + return ds, nil + } - // Append data + if isPESPayload(payload) { + pesData, err := parsePESData(r, payloadLength) + if err != nil { + return nil, fmt.Errorf("parsing PES data failed: %w", err) + } ds = append(ds, &DemuxerData{ - FirstPacket: ps[0], + FirstPacket: pkts[0], PES: pesData, PID: pid, }) } - return + + return ds, nil } -// isPSIPayload checks whether the payload is a PSI one +// isPSIPayload checks whether the payload is a PSI one. func isPSIPayload(pid uint16, pm *programMap) bool { return pid == PIDPAT || // PAT pm.exists(pid) || // PMT - ((pid >= 0x10 && pid <= 0x14) || (pid >= 0x1e && pid <= 0x1f)) //DVB + ((pid >= 0x10 && pid <= 0x14) || (pid >= 0x1e && pid <= 0x1f)) // DVB } -// isPESPayload checks whether the payload is a PES one +// isPESPayload checks whether the payload is a PES one. func isPESPayload(i []byte) bool { - // Packet is not big enough + // Packet is not big enough. if len(i) < 3 { return false } diff --git a/data_eit.go b/data_eit.go index 4dce53a..b063e48 100644 --- a/data_eit.go +++ b/data_eit.go @@ -4,12 +4,12 @@ import ( "fmt" "time" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) // EITData represents an EIT data -// Page: 36 | Chapter: 5.2.4 | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf +// Page: 36 | Chapter: 5.2.4 | Link: +// https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf type EITData struct { Events []*EITDataEvent LastTableID uint8 @@ -19,106 +19,60 @@ type EITData struct { TransportStreamID uint16 } -// EITDataEvent represents an EIT data event +// EITDataEvent represents an EIT data event. type EITDataEvent struct { + Duration time.Duration + EventID uint16 + StartTime time.Time + RunningStatus uint8 + + // When true indicates that access to one or + // more streams may be controlled by a CA system. + HasFreeCSAMode bool Descriptors []*Descriptor - Duration time.Duration - EventID uint16 - HasFreeCSAMode bool // When true indicates that access to one or more streams may be controlled by a CA system. - RunningStatus uint8 - StartTime time.Time } -// parseEITSection parses an EIT section -func parseEITSection(i *astikit.BytesIterator, offsetSectionsEnd int, tableIDExtension uint16) (d *EITData, err error) { - // Create data - d = &EITData{ServiceID: tableIDExtension} +// parseEITSection parses an EIT section. +func parseEITSection( + r *bitio.CountReader, + offsetSectionsEnd int64, + tableIDExtension uint16, +) (*EITData, error) { + d := &EITData{ServiceID: tableIDExtension} - // Get next 2 bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.TransportStreamID = uint16(r.TryReadBits(16)) - // Transport stream ID - d.TransportStreamID = uint16(bs[0])<<8 | uint16(bs[1]) + d.OriginalNetworkID = uint16(r.TryReadBits(16)) - // Get next 2 bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.SegmentLastSectionNumber = r.TryReadByte() - // Original network ID - d.OriginalNetworkID = uint16(bs[0])<<8 | uint16(bs[1]) + d.LastTableID = r.TryReadByte() - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + // Loop until end of section data is reached. + for r.BitsCount < offsetSectionsEnd { + e := &EITDataEvent{} - // Segment last section number - d.SegmentLastSectionNumber = uint8(b) + e.EventID = uint16(r.TryReadBits(16)) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Last table ID - d.LastTableID = uint8(b) - - // Loop until end of section data is reached - for i.Offset() < offsetSectionsEnd { - // Get next 2 bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return + var err error + if e.StartTime, err = parseDVBTime(r); err != nil { + return nil, fmt.Errorf("parsing DVB time: %w", err) } - // Event ID - var e = &EITDataEvent{} - e.EventID = uint16(bs[0])<<8 | uint16(bs[1]) - - // Start time - if e.StartTime, err = parseDVBTime(i); err != nil { - err = fmt.Errorf("astits: parsing DVB time") - return + if e.Duration, err = parseDVBDurationSeconds(r); err != nil { + return nil, fmt.Errorf("parsing DVB duration seconds failed: %w", err) } - // Duration - if e.Duration, err = parseDVBDurationSeconds(i); err != nil { - err = fmt.Errorf("astits: parsing DVB duration seconds failed: %w", err) - return - } - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + e.RunningStatus = uint8(r.TryReadBits(3)) - // Running status - e.RunningStatus = uint8(b) >> 5 + e.HasFreeCSAMode = r.TryReadBool() - // Free CA mode - e.HasFreeCSAMode = uint8(b&0x10) > 0 - - // We need to rewind since the current byte is used by the descriptor as well - i.Skip(-1) - - // Descriptors - if e.Descriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + if e.Descriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } - // Add event d.Events = append(d.Events, e) } - return + + return d, r.TryError } diff --git a/data_eit_test.go b/data_eit_test.go index 2b3f494..dea41c0 100644 --- a/data_eit_test.go +++ b/data_eit_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -26,23 +26,24 @@ var eit = &EITData{ func eitBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint16(2)) // Transport stream ID - w.Write(uint16(3)) // Original network ID - w.Write(uint8(4)) // Segment last section number - w.Write(uint8(5)) // Last table id - w.Write(uint16(6)) // Event #1 id + w := bitio.NewWriter(buf) + w.WriteBits(uint64(2), 16) // Transport stream ID + w.WriteBits(uint64(3), 16) // Original network ID + w.WriteByte(uint8(4)) // Segment last section number + w.WriteByte(uint8(5)) // Last table id + w.WriteBits(uint64(6), 16) // Event #1 id w.Write(dvbTimeBytes) // Event #1 start time w.Write(dvbDurationSecondsBytes) // Event #1 duration - w.Write("111") // Event #1 running status - w.Write("1") // Event #1 free CA mode + WriteBinary(w, "111") // Event #1 running status + w.WriteBool(true) // Event #1 free CA mode descriptorsBytes(w) // Event #1 descriptors return buf.Bytes() } func TestParseEITSection(t *testing.T) { - var b = eitBytes() - d, err := parseEITSection(astikit.NewBytesIterator(b), len(b), uint16(1)) + b := eitBytes() + r := bitio.NewCountReader(bytes.NewReader(b)) + d, err := parseEITSection(r, int64(len(b)*8), uint16(1)) assert.Equal(t, d, eit) assert.NoError(t, err) } diff --git a/data_nit.go b/data_nit.go index 5191b5d..233a0b9 100644 --- a/data_nit.go +++ b/data_nit.go @@ -3,78 +3,52 @@ package astits import ( "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// NITData represents a NIT data -// Page: 29 | Chapter: 5.2.1 | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf +// NITData represents a NIT data. +// Page: 29 | Chapter: 5.2.1 | Link: +// https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf type NITData struct { NetworkDescriptors []*Descriptor NetworkID uint16 TransportStreams []*NITDataTransportStream } -// NITDataTransportStream represents a NIT data transport stream +// NITDataTransportStream represents a NIT data transport stream. type NITDataTransportStream struct { OriginalNetworkID uint16 TransportDescriptors []*Descriptor TransportStreamID uint16 } -// parseNITSection parses a NIT section -func parseNITSection(i *astikit.BytesIterator, tableIDExtension uint16) (d *NITData, err error) { - // Create data - d = &NITData{NetworkID: tableIDExtension} +// parseNITSection parses a NIT section. +func parseNITSection(r *bitio.CountReader, tableIDExtension uint16) (*NITData, error) { + d := &NITData{NetworkID: tableIDExtension} - // Network descriptors - if d.NetworkDescriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return - } + _ = r.TryReadBits(4) - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return + var err error + if d.NetworkDescriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } - // Transport stream loop length - transportStreamLoopLength := int(uint16(bs[0]&0xf)<<8 | uint16(bs[1])) + transportStreamLoopLength := int64(r.TryReadBits(16)) - // Transport stream loop - offsetEnd := i.Offset() + transportStreamLoopLength - for i.Offset() < offsetEnd { - // Create transport stream + offsetEnd := r.BitsCount/8 + transportStreamLoopLength + for r.BitsCount/8 < offsetEnd { ts := &NITDataTransportStream{} - // Get next bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Transport stream ID - ts.TransportStreamID = uint16(bs[0])<<8 | uint16(bs[1]) - - // Get next bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + ts.TransportStreamID = uint16(r.TryReadBits(16)) - // Original network ID - ts.OriginalNetworkID = uint16(bs[0])<<8 | uint16(bs[1]) + ts.OriginalNetworkID = uint16(r.TryReadBits(16)) - // Transport descriptors - if ts.TransportDescriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + _ = r.TryReadBits(4) + if ts.TransportDescriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } - // Append transport stream d.TransportStreams = append(d.TransportStreams, ts) } - return + return d, r.TryError } diff --git a/data_nit_test.go b/data_nit_test.go index 902f20d..d0c9002 100644 --- a/data_nit_test.go +++ b/data_nit_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -20,21 +20,22 @@ var nit = &NITData{ func nitBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("0000") // Reserved for future use - descriptorsBytes(w) // Network descriptors - w.Write("0000") // Reserved for future use - w.Write("000000001001") // Transport stream loop length - w.Write(uint16(2)) // Transport stream #1 id - w.Write(uint16(3)) // Transport stream #1 original network id - w.Write("0000") // Transport stream #1 reserved for future use - descriptorsBytes(w) // Transport stream #1 descriptors + w := bitio.NewWriter(buf) + WriteBinary(w, "0000") // Reserved for future use + descriptorsBytes(w) // Network descriptors + WriteBinary(w, "0000") // Reserved for future use + WriteBinary(w, "000000001001") // Transport stream loop length + w.WriteBits(uint64(2), 16) // Transport stream #1 id + w.WriteBits(uint64(3), 16) // Transport stream #1 original network id + WriteBinary(w, "0000") // Transport stream #1 reserved for future use + descriptorsBytes(w) // Transport stream #1 descriptors return buf.Bytes() } func TestParseNITSection(t *testing.T) { - var b = nitBytes() - d, err := parseNITSection(astikit.NewBytesIterator(b), uint16(1)) + b := nitBytes() + r := bitio.NewCountReader(bytes.NewReader(b)) + d, err := parseNITSection(r, uint16(1)) assert.Equal(t, d, nit) assert.NoError(t, err) } diff --git a/data_pat.go b/data_pat.go index 3b5b555..05ad5d8 100644 --- a/data_pat.go +++ b/data_pat.go @@ -1,10 +1,6 @@ package astits -import ( - "fmt" - - "github.com/asticode/go-astikit" -) +import "github.com/icza/bitio" const ( patSectionEntryBytesSize = 4 // 16 bits + 3 reserved + 13 bits = 32 bits @@ -17,47 +13,46 @@ type PATData struct { TransportStreamID uint16 } -// PATProgram represents a PAT program +// PATProgram represents a PAT program. type PATProgram struct { - ProgramMapID uint16 // The packet identifier that contains the associated PMT - ProgramNumber uint16 // Relates to the Table ID extension in the associated PMT. A value of 0 is reserved for a NIT packet identifier. + // ProgramNumber Relates to the Table ID extension in the associated PMT. + // A value of 0 is reserved for a NIT packet identifier. + ProgramNumber uint16 + + // ProgramMapID 13 bits. The packet identifier that contains the associated PMT + ProgramMapID uint16 } -// parsePATSection parses a PAT section -func parsePATSection(i *astikit.BytesIterator, offsetSectionsEnd int, tableIDExtension uint16) (d *PATData, err error) { - // Create data - d = &PATData{TransportStreamID: tableIDExtension} - - // Loop until end of section data is reached - for i.Offset() < offsetSectionsEnd { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Append program - d.Programs = append(d.Programs, &PATProgram{ - ProgramMapID: uint16(bs[2]&0x1f)<<8 | uint16(bs[3]), - ProgramNumber: uint16(bs[0])<<8 | uint16(bs[1]), - }) +// parsePATSection parses a PAT section. +func parsePATSection( + r *bitio.CountReader, + offsetSectionsEnd int64, + tableIDExtension uint16, +) (*PATData, error) { + d := &PATData{TransportStreamID: tableIDExtension} + + for r.BitsCount < offsetSectionsEnd { + p := &PATProgram{} + + p.ProgramNumber = uint16(r.TryReadBits(16)) + r.TryReadBits(3) + p.ProgramMapID = uint16(r.TryReadBits(13)) + + d.Programs = append(d.Programs, p) } - return + return d, r.TryError } func calcPATSectionLength(d *PATData) uint16 { return uint16(4 * len(d.Programs)) } -func writePATSection(w *astikit.BitsWriter, d *PATData) (int, error) { - b := astikit.NewBitsWriterBatch(w) - +func writePATSection(w *bitio.Writer, d *PATData) (int, error) { for _, p := range d.Programs { - b.Write(p.ProgramNumber) - b.WriteN(uint8(0xff), 3) - b.WriteN(p.ProgramMapID, 13) + w.TryWriteBits(uint64(p.ProgramNumber), 16) + w.TryWriteBits(0xff, 3) + w.TryWriteBits(uint64(p.ProgramMapID), 13) } - return len(d.Programs) * patSectionEntryBytesSize, b.Err() + return len(d.Programs) * patSectionEntryBytesSize, w.TryError } diff --git a/data_pat_test.go b/data_pat_test.go index 5f4fd91..80f66fb 100644 --- a/data_pat_test.go +++ b/data_pat_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -18,26 +18,27 @@ var pat = &PATData{ func patBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint16(2)) // Program #1 number - w.Write("111") // Program #1 reserved bits - w.Write("0000000000011") // Program #1 map ID - w.Write(uint16(4)) // Program #2 number - w.Write("111") // Program #2 reserved bits - w.Write("0000000000101") // Program #3 map ID + w := bitio.NewWriter(buf) + w.WriteBits(uint64(2), 16) // Program #1 number + WriteBinary(w, "111") // Program #1 reserved bits + WriteBinary(w, "0000000000011") // Program #1 map ID + w.WriteBits(uint64(4), 16) // Program #2 number + WriteBinary(w, "111") // Program #2 reserved bits + WriteBinary(w, "0000000000101") // Program #3 map ID return buf.Bytes() } func TestParsePATSection(t *testing.T) { - var b = patBytes() - d, err := parsePATSection(astikit.NewBytesIterator(b), len(b), uint16(1)) + b := patBytes() + r := bitio.NewCountReader(bytes.NewReader(b)) + d, err := parsePATSection(r, int64(len(b)*8), uint16(1)) assert.Equal(t, d, pat) assert.NoError(t, err) } func TestWritePATSection(t *testing.T) { bw := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: bw}) + w := bitio.NewWriter(bw) n, err := writePATSection(w, pat) assert.NoError(t, err) assert.Equal(t, n, 8) @@ -50,7 +51,8 @@ func BenchmarkParsePATSection(b *testing.B) { bs := patBytes() for i := 0; i < b.N; i++ { - parsePATSection(astikit.NewBytesIterator(bs), len(bs), uint16(1)) + r := bitio.NewCountReader(bytes.NewReader(bs)) + parsePATSection(r, int64(len(bs)), uint16(1)) } } @@ -59,7 +61,7 @@ func BenchmarkWritePATSection(b *testing.B) { bw := &bytes.Buffer{} bw.Grow(1024) - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: bw}) + w := bitio.NewWriter(bw) for i := 0; i < b.N; i++ { bw.Reset() diff --git a/data_pes.go b/data_pes.go index 61b19b1..de05a41 100644 --- a/data_pes.go +++ b/data_pes.go @@ -1,18 +1,19 @@ package astits import ( + "bytes" "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// P-STD buffer scales +// P-STD buffer scales. const ( PSTDBufferScale128Bytes = 0 PSTDBufferScale1024Bytes = 1 ) -// PTS DTS indicator +// PTS DTS indicator. const ( PTSDTSIndicatorBothPresent = 3 PTSDTSIndicatorIsForbidden = 1 @@ -20,14 +21,14 @@ const ( PTSDTSIndicatorOnlyPTS = 2 ) -// Stream IDs +// Stream IDs. const ( StreamIDPrivateStream1 = 189 StreamIDPaddingStream = 190 StreamIDPrivateStream2 = 191 ) -// Trick mode controls +// Trick mode controls. const ( TrickModeControlFastForward = 0 TrickModeControlFastReverse = 3 @@ -43,7 +44,7 @@ const ( dsmTrickModeLength = 1 ) -// PESData represents a PES data +// PESData represents a PES data. // https://en.wikipedia.org/wiki/Packetized_elementary_stream // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html // http://happy.emu.id.au/lab/tut/dttb/dtbtut4b.htm @@ -52,406 +53,351 @@ type PESData struct { Header *PESHeader } -// PESHeader represents a packet PES header +// PESHeader represents a packet PES header. type PESHeader struct { OptionalHeader *PESOptionalHeader - PacketLength uint16 // Specifies the number of bytes remaining in the packet after this field. Can be zero. If the PES packet length is set to zero, the PES packet can be of any length. A value of zero for the PES packet length can be used only when the PES packet payload is a video elementary stream. - StreamID uint8 // Examples: Audio streams (0xC0-0xDF), Video streams (0xE0-0xEF) + + // Specifies the number of bytes remaining in the packet + // after this field. Can be zero. If the PES packet length + // is set to zero, the PES packet can be of any length. + // A value of zero for the PES packet length can be used + // only when the PES packet payload is a video elementary stream. + PacketLength uint16 + + // Examples: Audio streams (0xC0-0xDF), Video streams (0xE0-0xEF) + StreamID uint8 } -// PESOptionalHeader represents a PES optional header +// PESOptionalHeader represents a PES optional header. type PESOptionalHeader struct { - AdditionalCopyInfo uint8 - CRC uint16 - DataAlignmentIndicator bool // True indicates that the PES packet header is immediately followed by the video start code or audio syncword - DSMTrickMode *DSMTrickMode - DTS *ClockReference - ESCR *ClockReference - ESRate uint32 - Extension2Data []byte - Extension2Length uint8 - HasAdditionalCopyInfo bool - HasCRC bool - HasDSMTrickMode bool - HasESCR bool - HasESRate bool - HasExtension bool - HasExtension2 bool - HasOptionalFields bool - HasPackHeaderField bool + MarkerBits uint8 // 2 bits. + ScramblingControl uint8 // 2 bits. + Priority bool + DataAlignmentIndicator bool + IsCopyrighted bool + IsOriginal bool + + PTSDTSIndicator uint8 // 2 bits. + HasESCR bool + HasESRate bool + HasDSMTrickMode bool + HasAdditionalCopyInfo bool + HasCRC bool + HasExtension bool + + HeaderLength uint8 + + PTS *ClockReference + DTS *ClockReference + ESCR *ClockReference + ESRate uint32 // 22 bits. + DSMTrickMode *DSMTrickMode + AdditionalCopyInfo uint8 // 7 bits. + CRC uint16 + HasPrivateData bool + HasPackHeaderField bool HasProgramPacketSequenceCounter bool HasPSTDBuffer bool - HeaderLength uint8 - IsCopyrighted bool - IsOriginal bool - MarkerBits uint8 - MPEG1OrMPEG2ID uint8 - OriginalStuffingLength uint8 - PacketSequenceCounter uint8 - PackField uint8 - Priority bool - PrivateData []byte - PSTDBufferScale uint8 - PSTDBufferSize uint16 - PTS *ClockReference - PTSDTSIndicator uint8 - ScramblingControl uint8 + HasExtension2 bool + + PrivateData []byte // 16 bytes. + PackField uint8 + + PacketSequenceCounter uint8 // 7 bits. + MPEG1OrMPEG2ID bool + OriginalStuffingLength uint8 // 5 bits? + + PSTDBufferScale bool + PSTDBufferSize uint16 // 13 bits. + + Extension2Length uint8 // 7 bits. + Extension2Data []byte } -// DSMTrickMode represents a DSM trick mode -// https://books.google.fr/books?id=vwUrAwAAQBAJ&pg=PT501&lpg=PT501&dq=dsm+trick+mode+control&source=bl&ots=fI-9IHXMRL&sig=PWnhxrsoMWNQcl1rMCPmJGNO9Ds&hl=fr&sa=X&ved=0ahUKEwjogafD8bjXAhVQ3KQKHeHKD5oQ6AEINDAB#v=onepage&q=dsm%20trick%20mode%20control&f=false +// DSMTrickMode represents a DSM trick mode. +// https://patents.google.com/patent/US8213779B2/en type DSMTrickMode struct { - FieldID uint8 - FrequencyTruncation uint8 - IntraSliceRefresh uint8 - RepeatControl uint8 - TrickModeControl uint8 + TrickModeControl uint8 // 3 Bits. + FieldID uint8 // 2 Bits. + IntraSliceRefresh bool + FrequencyTruncation uint8 // 2 Bits. + RepeatControl uint8 // 5 Bits. } +// IsVideoStream . func (h *PESHeader) IsVideoStream() bool { return h.StreamID == 0xe0 || h.StreamID == 0xfd } -// parsePESData parses a PES data -func parsePESData(i *astikit.BytesIterator) (d *PESData, err error) { - // Create data - d = &PESData{} +// parsePESData parses a PES data. +func parsePESData(r *bitio.CountReader, payloadLength int64) (*PESData, error) { + d := &PESData{} // Skip first 3 bytes that are there to identify the PES payload - i.Seek(3) + skip := make([]byte, 3) + TryReadFull(r, skip) - // Parse header - var dataStart, dataEnd int - if d.Header, dataStart, dataEnd, err = parsePESHeader(i); err != nil { - err = fmt.Errorf("astits: parsing PES header failed: %w", err) - return + header, dataStart, dataEnd, err := parsePESHeader(r, payloadLength) + if err != nil { + return nil, fmt.Errorf("parsing PES header failed: %w", err) } + d.Header = header - // Seek to data - i.Seek(dataStart) - - // Extract data - if d.Data, err = i.NextBytes(dataEnd - dataStart); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return + if dataStart > r.BitsCount { + skip := make([]byte, (dataStart-r.BitsCount)/8) + TryReadFull(r, skip) } - return + + d.Data = make([]byte, (dataEnd-dataStart)/8) + TryReadFull(r, d.Data) + + return d, r.TryError } -// hasPESOptionalHeader checks whether the data has a PES optional header +// hasPESOptionalHeader checks whether the data has a PES optional header. func hasPESOptionalHeader(streamID uint8) bool { return streamID != StreamIDPaddingStream && streamID != StreamIDPrivateStream2 } -// parsePESHeader parses a PES header -func parsePESHeader(i *astikit.BytesIterator) (h *PESHeader, dataStart, dataEnd int, err error) { - // Create header +// parsePESHeader parses a PES header. +func parsePESHeader(r *bitio.CountReader, payloadLength int64) (h *PESHeader, dataStart, dataEnd int64, err error) { h = &PESHeader{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Stream ID - h.StreamID = uint8(b) - - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + h.StreamID = r.TryReadByte() - // Length - h.PacketLength = uint16(bs[0])<<8 | uint16(bs[1]) + h.PacketLength = uint16(r.TryReadBits(16)) // Update data end if h.PacketLength > 0 { - dataEnd = i.Offset() + int(h.PacketLength) + dataEnd = r.BitsCount + int64(h.PacketLength*8) } else { - dataEnd = i.Len() + dataEnd = payloadLength } - // Optional header if hasPESOptionalHeader(h.StreamID) { - if h.OptionalHeader, dataStart, err = parsePESOptionalHeader(i); err != nil { - err = fmt.Errorf("astits: parsing PES optional header failed: %w", err) + h.OptionalHeader, dataStart, err = parsePESOptionalHeader(r) + if err != nil { + err = fmt.Errorf("parsing PES optional header failed: %w", err) return } } else { - dataStart = i.Offset() + dataStart = r.BitsCount } - return + + return h, dataStart, dataEnd, r.TryError } -// parsePESOptionalHeader parses a PES optional header -func parsePESOptionalHeader(i *astikit.BytesIterator) (h *PESOptionalHeader, dataStart int, err error) { +// parsePESOptionalHeader parses a PES optional header. +func parsePESOptionalHeader(r *bitio.CountReader) (*PESOptionalHeader, int64, error) { //nolint:funlen // Create header - h = &PESOptionalHeader{} - - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Marker bits - h.MarkerBits = uint8(b) >> 6 - - // Scrambling control - h.ScramblingControl = uint8(b) >> 4 & 0x3 + h := &PESOptionalHeader{} - // Priority - h.Priority = uint8(b)&0x8 > 0 + h.MarkerBits = uint8(r.TryReadBits(2)) + h.ScramblingControl = uint8(r.TryReadBits(2)) + h.Priority = r.TryReadBool() + h.DataAlignmentIndicator = r.TryReadBool() + h.IsCopyrighted = r.TryReadBool() + h.IsOriginal = r.TryReadBool() - // Data alignment indicator - h.DataAlignmentIndicator = uint8(b)&0x4 > 0 + h.PTSDTSIndicator = uint8(r.TryReadBits(2)) + h.HasESCR = r.TryReadBool() + h.HasESRate = r.TryReadBool() + h.HasDSMTrickMode = r.TryReadBool() + h.HasAdditionalCopyInfo = r.TryReadBool() + h.HasCRC = r.TryReadBool() + h.HasExtension = r.TryReadBool() - // Copyrighted - h.IsCopyrighted = uint(b)&0x2 > 0 - - // Original or copy - h.IsOriginal = uint8(b)&0x1 > 0 - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // PTS DST indicator - h.PTSDTSIndicator = uint8(b) >> 6 & 0x3 - - // Flags - h.HasESCR = uint8(b)&0x20 > 0 - h.HasESRate = uint8(b)&0x10 > 0 - h.HasDSMTrickMode = uint8(b)&0x8 > 0 - h.HasAdditionalCopyInfo = uint8(b)&0x4 > 0 - h.HasCRC = uint8(b)&0x2 > 0 - h.HasExtension = uint8(b)&0x1 > 0 - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Header length - h.HeaderLength = uint8(b) + h.HeaderLength = r.TryReadByte() // Update data start - dataStart = i.Offset() + int(h.HeaderLength) + dataStart := r.BitsCount + int64(h.HeaderLength)*8 + var err error // PTS/DTS if h.PTSDTSIndicator == PTSDTSIndicatorOnlyPTS { - if h.PTS, err = parsePTSOrDTS(i); err != nil { - err = fmt.Errorf("astits: parsing PTS failed: %w", err) - return + _ = r.TryReadBits(4) // Reserved. + if h.PTS, err = parsePTSOrDTS(r); err != nil { + return nil, 0, fmt.Errorf("parsing PTS failed: %w", err) } } else if h.PTSDTSIndicator == PTSDTSIndicatorBothPresent { - if h.PTS, err = parsePTSOrDTS(i); err != nil { - err = fmt.Errorf("astits: parsing PTS failed: %w", err) - return + _ = r.TryReadBits(4) // Reserved. + if h.PTS, err = parsePTSOrDTS(r); err != nil { + return nil, 0, fmt.Errorf("parsing PTS failed: %w", err) } - if h.DTS, err = parsePTSOrDTS(i); err != nil { - err = fmt.Errorf("astits: parsing PTS failed: %w", err) - return + _ = r.TryReadBits(4) // Reserved. + if h.DTS, err = parsePTSOrDTS(r); err != nil { + return nil, 0, fmt.Errorf("parsing PTS failed: %w", err) } } - // ESCR if h.HasESCR { - if h.ESCR, err = parseESCR(i); err != nil { - err = fmt.Errorf("astits: parsing ESCR failed: %w", err) - return + if h.ESCR, err = parseESCR(r); err != nil { + return nil, 0, fmt.Errorf("parsing ESCR failed: %w", err) } } - // ES rate if h.HasESRate { - var bs []byte - if bs, err = i.NextBytesNoCopy(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - h.ESRate = uint32(bs[0])&0x7f<<15 | uint32(bs[1])<<7 | uint32(bs[2])>>1 + _ = r.TryReadBool() // Reserved. + h.ESRate = uint32(r.TryReadBits(22)) + _ = r.TryReadBool() // Reserved. } - // Trick mode if h.HasDSMTrickMode { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return + h.DSMTrickMode, err = parseDSMTrickMode(r) + if err != nil { + return nil, 0, fmt.Errorf("parsing DSM trick mode failed: %w", err) } - h.DSMTrickMode = parseDSMTrickMode(b) } - // Additional copy info if h.HasAdditionalCopyInfo { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - h.AdditionalCopyInfo = b & 0x7f + _ = r.TryReadBool() // Reserved. + h.AdditionalCopyInfo = uint8(r.TryReadBits(7)) } - // CRC if h.HasCRC { - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - h.CRC = uint16(bs[0])>>8 | uint16(bs[1]) + h.CRC = uint16(r.TryReadBits(16)) } - // Extension - if h.HasExtension { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + if !h.HasExtension { + return h, dataStart, nil + } - // Flags - h.HasPrivateData = b&0x80 > 0 - h.HasPackHeaderField = b&0x40 > 0 - h.HasProgramPacketSequenceCounter = b&0x20 > 0 - h.HasPSTDBuffer = b&0x10 > 0 - h.HasExtension2 = b&0x1 > 0 + h.HasPrivateData = r.TryReadBool() + h.HasPackHeaderField = r.TryReadBool() + h.HasProgramPacketSequenceCounter = r.TryReadBool() + h.HasPSTDBuffer = r.TryReadBool() + _ = r.TryReadBits(3) // Reserved. + h.HasExtension2 = r.TryReadBool() - // Private data - if h.HasPrivateData { - if h.PrivateData, err = i.NextBytes(16); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - } + if h.HasPrivateData { + h.PrivateData = make([]byte, 16) + TryReadFull(r, h.PrivateData) + } - // Pack field length - if h.HasPackHeaderField { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - // TODO it's only a length of pack_header, should read it all. now it's wrong - h.PackField = uint8(b) - } + if h.HasPackHeaderField { + // TODO it's only a length of pack_header, + // should read it all. now it's wrong. + h.PackField = r.TryReadByte() + } - // Program packet sequence counter - if h.HasProgramPacketSequenceCounter { - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - h.PacketSequenceCounter = uint8(bs[0]) & 0x7f - h.MPEG1OrMPEG2ID = uint8(bs[1]) >> 6 & 0x1 - h.OriginalStuffingLength = uint8(bs[1]) & 0x3f - } + if h.HasProgramPacketSequenceCounter { + _ = r.TryReadBool() // Reserved. + h.PacketSequenceCounter = uint8(r.TryReadBits(7)) - // P-STD buffer - if h.HasPSTDBuffer { - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - h.PSTDBufferScale = bs[0] >> 5 & 0x1 - h.PSTDBufferSize = uint16(bs[0])&0x1f<<8 | uint16(bs[1]) - } + h.MPEG1OrMPEG2ID = r.TryReadBool() + h.OriginalStuffingLength = uint8(r.TryReadBits(5)) // 5 bits? + _ = r.TryReadBits(2) // Reserved. + } - // Extension 2 - if h.HasExtension2 { - // Length - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - h.Extension2Length = uint8(b) & 0x7f - - // Data - if h.Extension2Data, err = i.NextBytes(int(h.Extension2Length)); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - } + if h.HasPSTDBuffer { + _ = r.TryReadBits(2) // Reserved. + h.PSTDBufferScale = r.TryReadBool() + h.PSTDBufferSize = uint16(r.TryReadBits(13)) } - return -} -// parseDSMTrickMode parses a DSM trick mode -func parseDSMTrickMode(i byte) (m *DSMTrickMode) { - m = &DSMTrickMode{} - m.TrickModeControl = i >> 5 - if m.TrickModeControl == TrickModeControlFastForward || m.TrickModeControl == TrickModeControlFastReverse { - m.FieldID = i >> 3 & 0x3 - m.IntraSliceRefresh = i >> 2 & 0x1 - m.FrequencyTruncation = i & 0x3 - } else if m.TrickModeControl == TrickModeControlFreezeFrame { - m.FieldID = i >> 3 & 0x3 - } else if m.TrickModeControl == TrickModeControlSlowMotion || m.TrickModeControl == TrickModeControlSlowReverse { - m.RepeatControl = i & 0x1f + if h.HasExtension2 { + _ = r.TryReadBool() // Reserved. + h.Extension2Length = uint8(r.TryReadBits(7)) + + h.Extension2Data = make([]byte, h.Extension2Length) + TryReadFull(r, h.Extension2Data) } - return + return h, dataStart, r.TryError } -// parsePTSOrDTS parses a PTS or a DTS -func parsePTSOrDTS(i *astikit.BytesIterator) (cr *ClockReference, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(5); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return +// parseDSMTrickMode parses a DSM trick mode. +func parseDSMTrickMode(r *bitio.CountReader) (*DSMTrickMode, error) { + m := &DSMTrickMode{} + m.TrickModeControl = uint8(r.TryReadBits(3)) + + switch m.TrickModeControl { + case TrickModeControlFastForward, TrickModeControlFastReverse: + m.FieldID = uint8(r.TryReadBits(2)) + m.IntraSliceRefresh = r.TryReadBool() + m.FrequencyTruncation = uint8(r.TryReadBits(2)) + + case TrickModeControlFreezeFrame: + m.FieldID = uint8(r.TryReadBits(2)) + _ = r.TryReadBits(3) + + case TrickModeControlSlowMotion, TrickModeControlSlowReverse: + m.RepeatControl = uint8(r.TryReadBits(5)) + + default: + _ = uint8(r.TryReadBits(5)) } - cr = newClockReference(int64(uint64(bs[0])>>1&0x7<<30|uint64(bs[1])<<22|uint64(bs[2])>>1&0x7f<<15|uint64(bs[3])<<7|uint64(bs[4])>>1&0x7f), 0) - return + return m, r.TryError } -// parseESCR parses an ESCR -func parseESCR(i *astikit.BytesIterator) (cr *ClockReference, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(6); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return +// readPTSOrDTS reads a PTS or a DTS. +func readPTSOrDTS(r *bitio.CountReader) (int64, error) { + buf := &bytes.Buffer{} + w := bitio.NewWriter(buf) + + w.TryWriteBits(r.TryReadBits(3), 3) // bits 32-30. + _ = r.TryReadBool() // Reserved. + w.TryWriteBits(r.TryReadBits(15), 15) // bits 27-15. + _ = r.TryReadBool() // Reserved. + w.TryWriteBits(r.TryReadBits(15), 15) // bits 27-15. + _ = r.TryReadBool() // Reserved. + + if r.TryError != nil { + return 0, fmt.Errorf("read: %w", r.TryError) + } + if w.TryError != nil { + return 0, fmt.Errorf("write: %w", w.TryError) } - escr := uint64(bs[0])>>3&0x7<<39 | uint64(bs[0])&0x3<<37 | uint64(bs[1])<<29 | uint64(bs[2])>>3<<24 | uint64(bs[2])&0x3<<22 | uint64(bs[3])<<14 | uint64(bs[4])>>3<<9 | uint64(bs[4])&0x3<<7 | uint64(bs[5])>>1 - cr = newClockReference(int64(escr>>9), int64(escr&0x1ff)) - return -} -// will count how many total bytes and payload bytes will be written when writePESData is called with the same arguments -// should be used by the caller of writePESData to determine AF stuffing size needed to be applied -// since the length of video PES packets are often zero, we can't just stuff it with 0xff-s at the end -func calcPESDataLength(h *PESHeader, payloadLeft []byte, isPayloadStart bool, bytesAvailable int) (totalBytes, payloadBytes int) { - totalBytes += pesHeaderLength - if isPayloadStart { - totalBytes += int(calcPESOptionalHeaderLength(h.OptionalHeader)) + if _, err := w.Align(); err != nil { + return 0, fmt.Errorf("align: %w", err) } - bytesAvailable -= totalBytes - if len(payloadLeft) < bytesAvailable { - payloadBytes = len(payloadLeft) - } else { - payloadBytes = bytesAvailable + base, err := bitio.NewReader(buf).ReadBits(33) + if err != nil { + return 0, fmt.Errorf("base: %w", w.TryError) } - return + return int64(base), nil +} + +// parsePTSOrDTS parses a PTS or a DTS. +func parsePTSOrDTS(r *bitio.CountReader) (*ClockReference, error) { + base, err := readPTSOrDTS(r) + return newClockReference(base, 0), err +} + +// parseESCR parses an ESCR. +func parseESCR(r *bitio.CountReader) (*ClockReference, error) { + r.TryReadBits(2) // Reserved + base, err := readPTSOrDTS(r) + if err != nil { + return nil, err + } + + ext := int64(r.TryReadBits(9)) + _ = r.TryReadBool() // Reserved. + + return newClockReference(base, ext), r.TryError } -// first packet will contain PES header with optional PES header and payload, if possible -// all consequential packets will contain just payload -// for the last packet caller must add AF with stuffing, see calcPESDataLength -func writePESData(w *astikit.BitsWriter, h *PESHeader, payloadLeft []byte, isPayloadStart bool, bytesAvailable int) (totalBytesWritten, payloadBytesWritten int, err error) { +// writePESData first packet will contain PES header with +// optional PES header and payload, if possible all consequential +// packets will contain just payload for the last packet caller +// must add AF with stuffing, see calcPESDataLength. +func writePESData( + w *bitio.Writer, + h *PESHeader, + payloadLeft []byte, + isPayloadStart bool, + bytesAvailable int, +) (totalBytesWritten, payloadBytesWritten int, err error) { if isPayloadStart { var n int n, err = writePESHeader(w, h, len(payloadLeft)) if err != nil { + err = fmt.Errorf("writing PES header failed: %w", err) return } totalBytesWritten += n @@ -462,8 +408,9 @@ func writePESData(w *astikit.BitsWriter, h *PESHeader, payloadLeft []byte, isPay payloadBytesWritten = len(payloadLeft) } - err = w.Write(payloadLeft[:payloadBytesWritten]) + _, err = w.Write(payloadLeft[:payloadBytesWritten]) if err != nil { + err = fmt.Errorf("writing payload failed: %w", err) return } @@ -471,11 +418,9 @@ func writePESData(w *astikit.BitsWriter, h *PESHeader, payloadLeft []byte, isPay return } -func writePESHeader(w *astikit.BitsWriter, h *PESHeader, payloadSize int) (int, error) { - b := astikit.NewBitsWriterBatch(w) - - b.WriteN(uint32(0x000001), 24) // packet_start_code_prefix - b.Write(h.StreamID) +func writePESHeader(w *bitio.Writer, h *PESHeader, payloadSize int) (int, error) { + w.TryWriteBits(0x000001, 24) // packet_start_code_prefix + w.TryWriteByte(h.StreamID) pesPacketLength := 0 @@ -489,19 +434,19 @@ func writePESHeader(w *astikit.BitsWriter, h *PESHeader, payloadSize int) (int, } } - b.Write(uint16(pesPacketLength)) + w.TryWriteBits(uint64(pesPacketLength), 16) bytesWritten := pesHeaderLength if hasPESOptionalHeader(h.StreamID) { - n, err := writePESOptionalHeader(w, h.OptionalHeader) + n, err := writePESOptionalHeader(bitio.NewWriter(w), h.OptionalHeader) if err != nil { - return 0, err + return 0, fmt.Errorf("writing optional header failed: %w", err) } bytesWritten += n } - return bytesWritten, b.Err() + return bytesWritten, w.TryError } func calcPESOptionalHeaderLength(h *PESOptionalHeader) uint8 { @@ -511,7 +456,8 @@ func calcPESOptionalHeaderLength(h *PESOptionalHeader) uint8 { return 3 + calcPESOptionalHeaderDataLength(h) } -func calcPESOptionalHeaderDataLength(h *PESOptionalHeader) (length uint8) { +func calcPESOptionalHeaderDataLength(h *PESOptionalHeader) uint8 { + var length uint8 if h.PTSDTSIndicator == PTSDTSIndicatorOnlyPTS { length += ptsOrDTSByteLength } else if h.PTSDTSIndicator == PTSDTSIndicatorBothPresent { @@ -534,9 +480,9 @@ func calcPESOptionalHeaderDataLength(h *PESOptionalHeader) (length uint8) { length++ } - if h.HasCRC { - //length += 4 // TODO - } + /*if h.HasCRC { + // length += 4 // TODO + }*/ if h.HasExtension { length++ @@ -545,9 +491,9 @@ func calcPESOptionalHeaderDataLength(h *PESOptionalHeader) (length uint8) { length += 16 } - if h.HasPackHeaderField { + /*if h.HasPackHeaderField { // TODO - } + }*/ if h.HasProgramPacketSequenceCounter { length += 2 @@ -562,41 +508,39 @@ func calcPESOptionalHeaderDataLength(h *PESOptionalHeader) (length uint8) { } } - return + return length } -func writePESOptionalHeader(w *astikit.BitsWriter, h *PESOptionalHeader) (int, error) { +func writePESOptionalHeader(w *bitio.Writer, h *PESOptionalHeader) (int, error) { //nolint:funlen if h == nil { return 0, nil } - b := astikit.NewBitsWriterBatch(w) - - b.WriteN(uint8(0b10), 2) // marker bits - b.WriteN(h.ScramblingControl, 2) - b.Write(h.Priority) - b.Write(h.DataAlignmentIndicator) - b.Write(h.IsCopyrighted) - b.Write(h.IsOriginal) + w.TryWriteBits(0b10, 2) // Marker bits. + w.TryWriteBits(uint64(h.ScramblingControl), 2) + w.TryWriteBool(h.Priority) + w.TryWriteBool(h.DataAlignmentIndicator) + w.TryWriteBool(h.IsCopyrighted) + w.TryWriteBool(h.IsOriginal) - b.WriteN(h.PTSDTSIndicator, 2) - b.Write(h.HasESCR) - b.Write(h.HasESRate) - b.Write(h.HasDSMTrickMode) - b.Write(h.HasAdditionalCopyInfo) - b.Write(false) // CRC of previous PES packet. not supported yet - //b.Write(h.HasCRC) - b.Write(h.HasExtension) + w.TryWriteBits(uint64(h.PTSDTSIndicator), 2) + w.TryWriteBool(h.HasESCR) + w.TryWriteBool(h.HasESRate) + w.TryWriteBool(h.HasDSMTrickMode) + w.TryWriteBool(h.HasAdditionalCopyInfo) + w.TryWriteBool(false) // CRC of previous PES packet. not supported yet + // b.Write(h.HasCRC) + w.TryWriteBool(h.HasExtension) pesOptionalHeaderDataLength := calcPESOptionalHeaderDataLength(h) - b.Write(pesOptionalHeaderDataLength) + w.TryWriteByte(pesOptionalHeaderDataLength) bytesWritten := 3 if h.PTSDTSIndicator == PTSDTSIndicatorOnlyPTS { n, err := writePTSOrDTS(w, 0b0010, h.PTS) if err != nil { - return 0, err + return 0, fmt.Errorf("PTS only: %w", err) } bytesWritten += n } @@ -604,13 +548,13 @@ func writePESOptionalHeader(w *astikit.BitsWriter, h *PESOptionalHeader) (int, e if h.PTSDTSIndicator == PTSDTSIndicatorBothPresent { n, err := writePTSOrDTS(w, 0b0011, h.PTS) if err != nil { - return 0, err + return 0, fmt.Errorf("PTS: %w", err) } bytesWritten += n n, err = writePTSOrDTS(w, 0b0001, h.DTS) if err != nil { - return 0, err + return 0, fmt.Errorf("DTS: %w", err) } bytesWritten += n } @@ -618,130 +562,131 @@ func writePESOptionalHeader(w *astikit.BitsWriter, h *PESOptionalHeader) (int, e if h.HasESCR { n, err := writeESCR(w, h.ESCR) if err != nil { - return 0, err + return 0, fmt.Errorf("ESCR: %w", err) } bytesWritten += n } if h.HasESRate { - b.Write(true) - b.WriteN(h.ESRate, 22) - b.Write(true) + w.TryWriteBool(true) + w.TryWriteBits(uint64(h.ESRate), 22) + w.TryWriteBool(true) bytesWritten += 3 } if h.HasDSMTrickMode { n, err := writeDSMTrickMode(w, h.DSMTrickMode) if err != nil { - return 0, err + return 0, fmt.Errorf("DMS trick mode: %w", err) } bytesWritten += n } if h.HasAdditionalCopyInfo { - b.Write(true) // marker_bit - b.WriteN(h.AdditionalCopyInfo, 7) + w.TryWriteBool(true) // marker_bit + w.TryWriteBits(uint64(h.AdditionalCopyInfo), 7) bytesWritten++ } - if h.HasCRC { + /*if h.HasCRC { // TODO, not supported - } + }*/ if h.HasExtension { - // exp 10110001 - // act 10111111 - b.Write(h.HasPrivateData) - b.Write(false) // TODO pack_header_field_flag, not implemented - //b.Write(h.HasPackHeaderField) - b.Write(h.HasProgramPacketSequenceCounter) - b.Write(h.HasPSTDBuffer) - b.WriteN(uint8(0xff), 3) // reserved - b.Write(h.HasExtension2) - bytesWritten++ + writePESExtension(w, h, &bytesWritten) + } - if h.HasPrivateData { - b.WriteBytesN(h.PrivateData, 16, 0) - bytesWritten += 16 - } + return bytesWritten, w.TryError +} - if h.HasPackHeaderField { - // TODO (see parsePESOptionalHeader) - } +func writePESExtension(w *bitio.Writer, h *PESOptionalHeader, bytesWritten *int) { + w.TryWriteBool(h.HasPrivateData) + w.TryWriteBool(false) // TODO pack_header_field_flag, not implemented + // b.Write(h.HasPackHeaderField) + w.TryWriteBool(h.HasProgramPacketSequenceCounter) + w.TryWriteBool(h.HasPSTDBuffer) + w.TryWriteBits(0xff, 3) // reserved + w.TryWriteBool(h.HasExtension2) + *bytesWritten++ - if h.HasProgramPacketSequenceCounter { - b.Write(true) // marker_bit - b.WriteN(h.PacketSequenceCounter, 7) - b.Write(true) // marker_bit - b.WriteN(h.MPEG1OrMPEG2ID, 1) - b.WriteN(h.OriginalStuffingLength, 6) - bytesWritten += 2 - } + if h.HasPrivateData { + w.TryWrite(h.PrivateData) + *bytesWritten += 16 + } - if h.HasPSTDBuffer { - b.WriteN(uint8(0b01), 2) - b.WriteN(h.PSTDBufferScale, 1) - b.WriteN(h.PSTDBufferSize, 13) - bytesWritten += 2 - } + /*if h.HasPackHeaderField { + // TODO (see parsePESOptionalHeader) + }*/ - if h.HasExtension2 { - b.Write(true) // marker_bit - b.WriteN(uint8(len(h.Extension2Data)), 7) - b.Write(h.Extension2Data) - bytesWritten += 1 + len(h.Extension2Data) - } + if h.HasProgramPacketSequenceCounter { + w.TryWriteBool(true) // marker_bit + w.TryWriteBits(uint64(h.PacketSequenceCounter), 7) + w.TryWriteBool(true) // marker_bit + w.TryWriteBool(h.MPEG1OrMPEG2ID) + w.TryWriteBits(uint64(h.OriginalStuffingLength), 6) + *bytesWritten += 2 } - return bytesWritten, b.Err() -} - -func writeDSMTrickMode(w *astikit.BitsWriter, m *DSMTrickMode) (int, error) { - b := astikit.NewBitsWriterBatch(w) - - b.WriteN(m.TrickModeControl, 3) - if m.TrickModeControl == TrickModeControlFastForward || m.TrickModeControl == TrickModeControlFastReverse { - b.WriteN(m.FieldID, 2) - b.Write(m.IntraSliceRefresh == 1) // it should be boolean - b.WriteN(m.FrequencyTruncation, 2) - } else if m.TrickModeControl == TrickModeControlFreezeFrame { - b.WriteN(m.FieldID, 2) - b.WriteN(uint8(0xff), 3) // reserved - } else if m.TrickModeControl == TrickModeControlSlowMotion || m.TrickModeControl == TrickModeControlSlowReverse { - b.WriteN(m.RepeatControl, 5) - } else { - b.WriteN(uint8(0xff), 5) // reserved + if h.HasPSTDBuffer { + w.TryWriteBits(0b01, 2) + w.TryWriteBool(h.PSTDBufferScale) + w.TryWriteBits(uint64(h.PSTDBufferSize), 13) + *bytesWritten += 2 } - return dsmTrickModeLength, b.Err() + if h.HasExtension2 { + w.TryWriteBool(true) // marker_bit + w.TryWriteBits(uint64(len(h.Extension2Data)), 7) + w.TryWrite(h.Extension2Data) + *bytesWritten += 1 + len(h.Extension2Data) + } } -func writeESCR(w *astikit.BitsWriter, cr *ClockReference) (int, error) { - b := astikit.NewBitsWriterBatch(w) +func writeDSMTrickMode(w *bitio.Writer, m *DSMTrickMode) (int, error) { + w.TryWriteBits(uint64(m.TrickModeControl), 3) + + switch m.TrickModeControl { + case TrickModeControlFastForward, TrickModeControlFastReverse: + w.TryWriteBits(uint64(m.FieldID), 2) + w.TryWriteBool(m.IntraSliceRefresh) + w.TryWriteBits(uint64(m.FrequencyTruncation), 2) - b.WriteN(uint8(0xff), 2) - b.WriteN(uint64(cr.Base>>30), 3) - b.Write(true) - b.WriteN(uint64(cr.Base>>15), 15) - b.Write(true) - b.WriteN(uint64(cr.Base), 15) - b.Write(true) - b.WriteN(uint64(cr.Extension), 9) - b.Write(true) + case TrickModeControlFreezeFrame: + w.TryWriteBits(uint64(m.FieldID), 2) + w.TryWriteBits(0xff, 3) - return escrLength, b.Err() + case TrickModeControlSlowMotion, TrickModeControlSlowReverse: + w.TryWriteBits(uint64(m.RepeatControl), 5) + + default: + w.TryWriteBits(0xff, 5) + } + + return dsmTrickModeLength, w.TryError } -func writePTSOrDTS(w *astikit.BitsWriter, flag uint8, cr *ClockReference) (bytesWritten int, retErr error) { - b := astikit.NewBitsWriterBatch(w) +func writeESCR(w *bitio.Writer, cr *ClockReference) (int, error) { + w.TryWriteBits(0xff, 2) + w.TryWriteBits(uint64(cr.Base>>30), 3) + w.TryWriteBool(true) + w.TryWriteBits(uint64(cr.Base>>15), 15) + w.TryWriteBool(true) + w.TryWriteBits(uint64(cr.Base), 15) + w.TryWriteBool(true) + w.TryWriteBits(uint64(cr.Extension), 9) + w.TryWriteBool(true) + + return escrLength, w.TryError +} - b.WriteN(flag, 4) - b.WriteN(uint64(cr.Base>>30), 3) - b.Write(true) - b.WriteN(uint64(cr.Base>>15), 15) - b.Write(true) - b.WriteN(uint64(cr.Base), 15) - b.Write(true) +func writePTSOrDTS(w *bitio.Writer, flag uint8, cr *ClockReference) (bytesWritten int, retErr error) { + w.TryWriteBits(uint64(flag), 4) + w.TryWriteBits(uint64(cr.Base>>30), 3) + w.TryWriteBool(true) + w.TryWriteBits(uint64(cr.Base>>15), 15) + w.TryWriteBool(true) + w.TryWriteBits(uint64(cr.Base), 15) + w.TryWriteBool(true) - return ptsOrDTSByteLength, b.Err() + return ptsOrDTSByteLength, w.TryError } diff --git a/data_pes_test.go b/data_pes_test.go index 4b32c71..2385396 100644 --- a/data_pes_test.go +++ b/data_pes_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -25,39 +25,39 @@ var dsmTrickModeSlow = &DSMTrickMode{ func dsmTrickModeSlowBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("001") // Control - w.Write("10101") // Repeat control + w := bitio.NewWriter(buf) + WriteBinary(w, "001") // Control + WriteBinary(w, "10101") // Repeat control return buf.Bytes() } type dsmTrickModeTestCase struct { name string - bytesFunc func(w *astikit.BitsWriter) + bytesFunc func(w *bitio.Writer) trickMode *DSMTrickMode } var dsmTrickModeTestCases = []dsmTrickModeTestCase{ { "fast_forward", - func(w *astikit.BitsWriter) { - w.Write("000") // Control - w.Write("10") // Field ID - w.Write("1") // Intra slice refresh - w.Write("11") // Frequency truncation + func(w *bitio.Writer) { + WriteBinary(w, "000") // Control + WriteBinary(w, "10") // Field ID + WriteBinary(w, "1") // Intra slice refresh + WriteBinary(w, "11") // Frequency truncation }, &DSMTrickMode{ FieldID: 2, FrequencyTruncation: 3, - IntraSliceRefresh: 1, + IntraSliceRefresh: true, TrickModeControl: TrickModeControlFastForward, }, }, { "slow_motion", - func(w *astikit.BitsWriter) { - w.Write("001") - w.Write("10101") + func(w *bitio.Writer) { + WriteBinary(w, "001") + WriteBinary(w, "10101") }, &DSMTrickMode{ RepeatControl: 0b10101, @@ -66,10 +66,10 @@ var dsmTrickModeTestCases = []dsmTrickModeTestCase{ }, { "freeze_frame", - func(w *astikit.BitsWriter) { - w.Write("010") // Control - w.Write("10") // Field ID - w.Write("111") // Reserved + func(w *bitio.Writer) { + WriteBinary(w, "010") // Control + WriteBinary(w, "10") // Field ID + WriteBinary(w, "111") // Reserved }, &DSMTrickMode{ FieldID: 2, @@ -78,24 +78,24 @@ var dsmTrickModeTestCases = []dsmTrickModeTestCase{ }, { "fast_reverse", - func(w *astikit.BitsWriter) { - w.Write("011") // Control - w.Write("10") // Field ID - w.Write("1") // Intra slice refresh - w.Write("11") // Frequency truncation + func(w *bitio.Writer) { + WriteBinary(w, "011") // Control + WriteBinary(w, "10") // Field ID + WriteBinary(w, "1") // Intra slice refresh + WriteBinary(w, "11") // Frequency truncation }, &DSMTrickMode{ FieldID: 2, FrequencyTruncation: 3, - IntraSliceRefresh: 1, + IntraSliceRefresh: true, TrickModeControl: TrickModeControlFastReverse, }, }, { "slow_reverse", - func(w *astikit.BitsWriter) { - w.Write("100") - w.Write("01010") + func(w *bitio.Writer) { + WriteBinary(w, "100") + WriteBinary(w, "01010") }, &DSMTrickMode{ RepeatControl: 0b01010, @@ -104,9 +104,9 @@ var dsmTrickModeTestCases = []dsmTrickModeTestCase{ }, { "reserved", - func(w *astikit.BitsWriter) { - w.Write("101") - w.Write("11111") + func(w *bitio.Writer) { + WriteBinary(w, "101") + WriteBinary(w, "11111") }, &DSMTrickMode{ TrickModeControl: 5, // reserved @@ -118,9 +118,12 @@ func TestParseDSMTrickMode(t *testing.T) { for _, tc := range dsmTrickModeTestCases { t.Run(tc.name, func(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) tc.bytesFunc(w) - assert.Equal(t, parseDSMTrickMode(buf.Bytes()[0]), tc.trickMode) + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + trickMode, err := parseDSMTrickMode(r) + assert.NoError(t, err) + assert.Equal(t, trickMode, tc.trickMode) }) } } @@ -129,11 +132,11 @@ func TestWriteDSMTrickMode(t *testing.T) { for _, tc := range dsmTrickModeTestCases { t.Run(tc.name, func(t *testing.T) { bufExpected := &bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: bufExpected}) + wExpected := bitio.NewWriter(bufExpected) tc.bytesFunc(wExpected) bufActual := &bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: bufActual}) + wActual := bitio.NewWriter(bufActual) n, err := writeDSMTrickMode(wActual, tc.trickMode) assert.NoError(t, err) @@ -148,14 +151,14 @@ var ptsClockReference = &ClockReference{Base: 5726623061} func ptsBytes(flag string) []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(flag) // Flag - w.Write("101") // 32...30 - w.Write("1") // Dummy - w.Write("010101010101010") // 29...15 - w.Write("1") // Dummy - w.Write("101010101010101") // 14...0 - w.Write("1") // Dummy + w := bitio.NewWriter(buf) + WriteBinary(w, flag) // Flag + WriteBinary(w, "101") // 32...30 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "010101010101010") // 29...15 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "101010101010101") // 14...0 + WriteBinary(w, "1") // Dummy return buf.Bytes() } @@ -163,26 +166,29 @@ var dtsClockReference = &ClockReference{Base: 5726623060} func dtsBytes(flag string) []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(flag) // Flag - w.Write("101") // 32...30 - w.Write("1") // Dummy - w.Write("010101010101010") // 29...15 - w.Write("1") // Dummy - w.Write("101010101010100") // 14...0 - w.Write("1") // Dummy + w := bitio.NewWriter(buf) + WriteBinary(w, flag) // Flag + WriteBinary(w, "101") // 32...30 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "010101010101010") // 29...15 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "101010101010100") // 14...0 + WriteBinary(w, "1") // Dummy return buf.Bytes() } func TestParsePTSOrDTS(t *testing.T) { - v, err := parsePTSOrDTS(astikit.NewBytesIterator(ptsBytes("0010"))) + r := bitio.NewCountReader(bytes.NewReader(ptsBytes("0010"))) + _, err := r.ReadBits(4) + assert.NoError(t, err) + v, err := parsePTSOrDTS(r) assert.Equal(t, v, ptsClockReference) assert.NoError(t, err) } func TestWritePTSOrDTS(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writePTSOrDTS(w, uint8(0b0010), dtsClockReference) assert.NoError(t, err) assert.Equal(t, n, 5) @@ -192,28 +198,29 @@ func TestWritePTSOrDTS(t *testing.T) { func escrBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("11") // Dummy - w.Write("011") // 32...30 - w.Write("1") // Dummy - w.Write("000010111110000") // 29...15 - w.Write("1") // Dummy - w.Write("000010111001111") // 14...0 - w.Write("1") // Dummy - w.Write("000111010") // Ext - w.Write("1") // Dummy + w := bitio.NewWriter(buf) + WriteBinary(w, "11") // Dummy + WriteBinary(w, "011") // 32...30 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "000010111110000") // 29...15 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "000010111001111") // 14...0 + WriteBinary(w, "1") // Dummy + WriteBinary(w, "000111010") // Ext + WriteBinary(w, "1") // Dummy return buf.Bytes() } func TestParseESCR(t *testing.T) { - v, err := parseESCR(astikit.NewBytesIterator(escrBytes())) + r := bitio.NewCountReader(bytes.NewReader(escrBytes())) + v, err := parseESCR(r) assert.Equal(t, v, clockReference) assert.NoError(t, err) } func TestWriteESCR(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writeESCR(w, clockReference) assert.NoError(t, err) assert.Equal(t, n, 6) @@ -223,24 +230,24 @@ func TestWriteESCR(t *testing.T) { type pesTestCase struct { name string - headerBytesFunc func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) - optionalHeaderBytesFunc func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) - bytesFunc func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) + headerBytesFunc func(w *bitio.Writer, withStuffing bool, withCRC bool) + optionalHeaderBytesFunc func(w *bitio.Writer, withStuffing bool, withCRC bool) + bytesFunc func(w *bitio.Writer, withStuffing bool, withCRC bool) pesData *PESData } var pesTestCases = []pesTestCase{ { "without_header", - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { - w.Write("000000000000000000000001") // Prefix - w.Write(uint8(StreamIDPaddingStream)) // Stream ID - w.Write(uint16(4)) // Packet length + func(w *bitio.Writer, withStuffing bool, withCRC bool) { + WriteBinary(w, "000000000000000000000001") // Prefix + w.WriteByte(StreamIDPaddingStream) // Stream ID + w.WriteBits(4, 16) // Packet length }, - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { + func(w *bitio.Writer, withStuffing bool, withCRC bool) { // do nothing here }, - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { + func(w *bitio.Writer, withStuffing bool, withCRC bool) { w.Write([]byte("data")) // Data }, &PESData{ @@ -253,7 +260,7 @@ var pesTestCases = []pesTestCase{ }, { "with_header", - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { + func(w *bitio.Writer, withStuffing bool, withCRC bool) { packetLength := 67 stuffing := []byte("stuff") @@ -265,12 +272,11 @@ var pesTestCases = []pesTestCase{ packetLength -= 2 } - w.Write("000000000000000000000001") // Prefix - w.Write(uint8(1)) // Stream ID - w.Write(uint16(packetLength)) // Packet length - + WriteBinary(w, "000000000000000000000001") // Prefix + w.WriteByte(1) // Stream ID + w.WriteBits(uint64(packetLength), 16) // Packet length }, - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { + func(w *bitio.Writer, withStuffing bool, withCRC bool) { optionalHeaderLength := 60 stuffing := []byte("stuff") @@ -282,47 +288,47 @@ var pesTestCases = []pesTestCase{ optionalHeaderLength -= 2 } - w.Write("10") // Marker bits - w.Write("01") // Scrambling control - w.Write("1") // Priority - w.Write("1") // Data alignment indicator - w.Write("1") // Copyright - w.Write("1") // Original or copy - w.Write("11") // PTS/DTS indicator - w.Write("1") // ESCR flag - w.Write("1") // ES rate flag - w.Write("1") // DSM trick mode flag - w.Write("1") // Additional copy flag - w.Write(withCRC) // CRC flag - w.Write("1") // Extension flag - w.Write(uint8(optionalHeaderLength)) // Header length - w.Write(ptsBytes("0011")) // PTS - w.Write(dtsBytes("0001")) // DTS - w.Write(escrBytes()) // ESCR - w.Write("101010101010101010101011") // ES rate - w.Write(dsmTrickModeSlowBytes()) // DSM trick mode - w.Write("11111111") // Additional copy info + WriteBinary(w, "10") // Marker bits + WriteBinary(w, "01") // Scrambling control + WriteBinary(w, "1") // Priority + WriteBinary(w, "1") // Data alignment indicator + WriteBinary(w, "1") // Copyright + WriteBinary(w, "1") // Original or copy + WriteBinary(w, "11") // PTS/DTS indicator + WriteBinary(w, "1") // ESCR flag + WriteBinary(w, "1") // ES rate flag + WriteBinary(w, "1") // DSM trick mode flag + WriteBinary(w, "1") // Additional copy flag + w.WriteBool(withCRC) // CRC flag + WriteBinary(w, "1") // Extension flag + w.WriteByte(uint8(optionalHeaderLength)) // Header length + w.Write(ptsBytes("0011")) // PTS + w.Write(dtsBytes("0001")) // DTS + w.Write(escrBytes()) // ESCR + WriteBinary(w, "101010101010101010101011") // ES rate + w.Write(dsmTrickModeSlowBytes()) // DSM trick mode + WriteBinary(w, "11111111") // Additional copy info if withCRC { - w.Write(uint16(4)) // CRC + w.WriteBits(4, 16) // CRC } // Extension starts here - w.Write("1") // Private data flag - w.Write("0") // Pack header field flag - w.Write("1") // Program packet sequence counter flag - w.Write("1") // PSTD buffer flag - w.Write("111") // Dummy - w.Write("1") // Extension 2 flag + WriteBinary(w, "1") // Private data flag + WriteBinary(w, "0") // Pack header field flag + WriteBinary(w, "1") // Program packet sequence counter flag + WriteBinary(w, "1") // PSTD buffer flag + WriteBinary(w, "111") // Dummy + WriteBinary(w, "1") // Extension 2 flag w.Write([]byte("1234567890123456")) // Private data - //w.Write(uint8(5)) // Pack field - w.Write("1101010111010101") // Packet sequence counter - w.Write("0111010101010101") // PSTD buffer - w.Write("10001010") // Extension 2 header - w.Write([]byte("extension2")) // Extension 2 data + // w.WriteByte(uint8(5)) // Pack field + WriteBinary(w, "1101010111010101") // Packet sequence counter + WriteBinary(w, "0111010101010101") // PSTD buffer + WriteBinary(w, "10001010") // Extension 2 header + w.Write([]byte("extension2")) // Extension 2 data if withStuffing { w.Write(stuffing) // Optional header stuffing bytes } }, - func(w *astikit.BitsWriter, withStuffing bool, withCRC bool) { + func(w *bitio.Writer, withStuffing bool, withCRC bool) { stuffing := []byte("stuff") w.Write([]byte("data")) // Data if withStuffing { @@ -357,13 +363,13 @@ var pesTestCases = []pesTestCase{ IsCopyrighted: true, IsOriginal: true, MarkerBits: 2, - MPEG1OrMPEG2ID: 1, + MPEG1OrMPEG2ID: true, OriginalStuffingLength: 21, PacketSequenceCounter: 85, - //PackField: 5, + // PackField: 5, Priority: true, PrivateData: []byte("1234567890123456"), - PSTDBufferScale: 1, + PSTDBufferScale: true, PSTDBufferSize: 5461, PTSDTSIndicator: 3, PTS: ptsClockReference, @@ -379,7 +385,7 @@ var pesTestCases = []pesTestCase{ // used by TestParseData func pesWithHeaderBytes() []byte { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) pesTestCases[1].headerBytesFunc(w, true, true) pesTestCases[1].optionalHeaderBytesFunc(w, true, true) pesTestCases[1].bytesFunc(w, true, true) @@ -395,11 +401,12 @@ func TestParsePESData(t *testing.T) { for _, tc := range pesTestCases { t.Run(tc.name, func(t *testing.T) { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) tc.headerBytesFunc(w, true, true) tc.optionalHeaderBytesFunc(w, true, true) tc.bytesFunc(w, true, true) - d, err := parsePESData(astikit.NewBytesIterator(buf.Bytes())) + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + d, err := parsePESData(r, int64(len(buf.Bytes())*8)) assert.NoError(t, err) assert.Equal(t, tc.pesData, d) }) @@ -410,17 +417,17 @@ func TestWritePESData(t *testing.T) { for _, tc := range pesTestCases { t.Run(tc.name, func(t *testing.T) { bufExpected := bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) tc.headerBytesFunc(wExpected, false, false) tc.optionalHeaderBytesFunc(wExpected, false, false) tc.bytesFunc(wExpected, false, false) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) start := true - totalBytes := 0 - payloadPos := 0 + var totalBytes int + var payloadPos int for payloadPos+1 < len(tc.pesData.Data) { n, payloadN, err := writePESData( @@ -448,12 +455,12 @@ func TestWritePESHeader(t *testing.T) { for _, tc := range pesTestCases { t.Run(tc.name, func(t *testing.T) { bufExpected := bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) tc.headerBytesFunc(wExpected, false, false) tc.optionalHeaderBytesFunc(wExpected, false, false) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) n, err := writePESHeader(wActual, tc.pesData.Header, len(tc.pesData.Data)) assert.NoError(t, err) @@ -468,11 +475,11 @@ func TestWritePESOptionalHeader(t *testing.T) { for _, tc := range pesTestCases { t.Run(tc.name, func(t *testing.T) { bufExpected := bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) tc.optionalHeaderBytesFunc(wExpected, false, false) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) n, err := writePESOptionalHeader(wActual, tc.pesData.Header.OptionalHeader) assert.NoError(t, err) @@ -488,7 +495,7 @@ func BenchmarkParsePESData(b *testing.B) { for ti, tc := range pesTestCases { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) tc.headerBytesFunc(w, true, true) tc.optionalHeaderBytesFunc(w, true, true) tc.bytesFunc(w, true, true) @@ -499,7 +506,8 @@ func BenchmarkParsePESData(b *testing.B) { b.Run(tc.name, func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - parsePESData(astikit.NewBytesIterator(bss[ti])) + r := bitio.NewCountReader(bytes.NewReader(bss[ti])) + parsePESData(r, int64(len(bss[ti])*8)) } }) } diff --git a/data_pmt.go b/data_pmt.go index a153c97..6000109 100644 --- a/data_pmt.go +++ b/data_pmt.go @@ -3,122 +3,119 @@ package astits import ( "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) +// StreamType . type StreamType uint8 -// Stream types +// Stream types. const ( - StreamTypeMPEG1Video StreamType = 0x01 - StreamTypeMPEG2Video StreamType = 0x02 - StreamTypeMPEG1Audio StreamType = 0x03 // ISO/IEC 11172-3 - StreamTypeMPEG2HalvedSampleRateAudio StreamType = 0x04 // ISO/IEC 13818-3 + StreamTypeMPEG1Video StreamType = 0x01 + StreamTypeMPEG2Video StreamType = 0x02 + + // ISO/IEC 11172-3. + StreamTypeMPEG1Audio StreamType = 0x03 + + // ISO/IEC 13818-3. + StreamTypeMPEG2HalvedSampleRateAudio StreamType = 0x04 StreamTypeMPEG2Audio StreamType = 0x04 StreamTypePrivateSection StreamType = 0x05 StreamTypePrivateData StreamType = 0x06 - StreamTypeMPEG2PacketizedData StreamType = 0x06 // Rec. ITU-T H.222 | ISO/IEC 13818-1 i.e., DVB subtitles/VBI and AC-3 - StreamTypeADTS StreamType = 0x0F // ISO/IEC 13818-7 Audio with ADTS transport syntax - StreamTypeAACAudio StreamType = 0x0f - StreamTypeMPEG4Video StreamType = 0x10 - StreamTypeAACLATMAudio StreamType = 0x11 - StreamTypeMetadata StreamType = 0x15 - StreamTypeH264Video StreamType = 0x1B // Rec. ITU-T H.264 | ISO/IEC 14496-10 - StreamTypeH265Video StreamType = 0x24 // Rec. ITU-T H.265 | ISO/IEC 23008-2 - StreamTypeHEVCVideo StreamType = 0x24 - StreamTypeCAVSVideo StreamType = 0x42 - StreamTypeVC1Video StreamType = 0xea - StreamTypeDIRACVideo StreamType = 0xd1 - StreamTypeAC3Audio StreamType = 0x81 - StreamTypeDTSAudio StreamType = 0x82 - StreamTypeTRUEHDAudio StreamType = 0x83 - StreamTypeSCTE35 StreamType = 0x86 - StreamTypeEAC3Audio StreamType = 0x87 + + // Rec. ITU-T H.222 | ISO/IEC 13818-1 i.e., DVB subtitles/VBI and AC-3. + StreamTypeMPEG2PacketizedData StreamType = 0x06 + + // ISO/IEC 13818-7 Audio with ADTS transport syntax. + StreamTypeADTS StreamType = 0x0F + StreamTypeAACAudio StreamType = 0x0f + StreamTypeMPEG4Video StreamType = 0x10 + StreamTypeAACLATMAudio StreamType = 0x11 + StreamTypeMetadata StreamType = 0x15 + + // Rec. ITU-T H.264 | ISO/IEC 14496-10. + StreamTypeH264Video StreamType = 0x1B + + // Rec. ITU-T H.265 | ISO/IEC 23008-2. + StreamTypeH265Video StreamType = 0x24 + StreamTypeHEVCVideo StreamType = 0x24 + StreamTypeCAVSVideo StreamType = 0x42 + StreamTypeVC1Video StreamType = 0xea + StreamTypeDIRACVideo StreamType = 0xd1 + StreamTypeAC3Audio StreamType = 0x81 + StreamTypeDTSAudio StreamType = 0x82 + StreamTypeTRUEHDAudio StreamType = 0x83 + StreamTypeSCTE35 StreamType = 0x86 + StreamTypeEAC3Audio StreamType = 0x87 ) -// PMTData represents a PMT data +// PMTData represents a PMT data. // https://en.wikipedia.org/wiki/Program-specific_information type PMTData struct { - ElementaryStreams []*PMTElementaryStream - PCRPID uint16 // The packet identifier that contains the program clock reference used to improve the random access accuracy of the stream's timing that is derived from the program timestamp. If this is unused. then it is set to 0x1FFF (all bits on). - ProgramDescriptors []*Descriptor // Program descriptors + ElementaryStreams []*PMTElementaryStream + + // PCRPID The packet identifier that contains the + // program clock reference used to improve the random + // access accuracy of the stream's timing that is + // derived from the program timestamp. If this is unused. + // then it is set to 0x1FFF (all bits on). + PCRPID uint16 + + ProgramDescriptors []*Descriptor ProgramNumber uint16 } -// PMTElementaryStream represents a PMT elementary stream +// PMTElementaryStream represents a PMT elementary stream. type PMTElementaryStream struct { - ElementaryPID uint16 // The packet identifier that contains the stream type data. - ElementaryStreamDescriptors []*Descriptor // Elementary stream descriptors - StreamType StreamType // This defines the structure of the data contained within the elementary packet identifier. + // This defines the structure of the data contained + // within the elementary packet identifier. + StreamType StreamType + + // The packet identifier that contains the stream type data. 13 bits. + ElementaryPID uint16 + + // Elementary stream descriptors. + ElementaryStreamDescriptors []*Descriptor } -// parsePMTSection parses a PMT section -func parsePMTSection(i *astikit.BytesIterator, offsetSectionsEnd int, tableIDExtension uint16) (d *PMTData, err error) { - // Create data - d = &PMTData{ProgramNumber: tableIDExtension} +// parsePMTSection parses a PMT section. +func parsePMTSection( + r *bitio.CountReader, + offsetSectionsEnd int64, + tableIDExtension uint16, +) (*PMTData, error) { + d := &PMTData{ProgramNumber: tableIDExtension} - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + _ = r.TryReadBits(3) // Reserved. + d.PCRPID = uint16(r.TryReadBits(13)) - // PCR PID - d.PCRPID = uint16(bs[0]&0x1f)<<8 | uint16(bs[1]) + _ = r.TryReadBits(4) - // Program descriptors - if d.ProgramDescriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + var err error + if d.ProgramDescriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing program descriptors failed: %w", err) } - // Loop until end of section data is reached - for i.Offset() < offsetSectionsEnd { - // Create stream + // Loop until end of section data is reached. + for r.BitsCount < offsetSectionsEnd { e := &PMTElementaryStream{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Stream type - e.StreamType = StreamType(b) - - // Get next bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + typ := r.TryReadByte() + e.StreamType = StreamType(typ) - // Elementary PID - e.ElementaryPID = uint16(bs[0]&0x1f)<<8 | uint16(bs[1]) + _ = r.TryReadBits(3) // Reserved. + e.ElementaryPID = uint16(r.TryReadBits(13)) + _ = r.TryReadBits(4) // Elementary descriptors - if e.ElementaryStreamDescriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + if e.ElementaryStreamDescriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } // Add elementary stream d.ElementaryStreams = append(d.ElementaryStreams, e) } - return -} - -func calcPMTProgramInfoLength(d *PMTData) uint16 { - ret := uint16(2) // program_info_length - ret += calcDescriptorsLength(d.ProgramDescriptors) - - for _, es := range d.ElementaryStreams { - ret += 5 // stream_type, elementary_pid, es_info_length - ret += calcDescriptorsLength(es.ElementaryStreamDescriptors) - } - - return ret + return d, r.TryError } func calcPMTSectionLength(d *PMTData) uint16 { @@ -133,13 +130,11 @@ func calcPMTSectionLength(d *PMTData) uint16 { return ret } -func writePMTSection(w *astikit.BitsWriter, d *PMTData) (int, error) { - b := astikit.NewBitsWriterBatch(w) - - // TODO split into sections +func writePMTSection(w *bitio.Writer, d *PMTData) (int, error) { + // TODO split into sections. - b.WriteN(uint8(0xff), 3) - b.WriteN(d.PCRPID, 13) + w.TryWriteBits(0xff, 3) + w.TryWriteBits(uint64(d.PCRPID), 13) bytesWritten := 2 n, err := writeDescriptorsWithLength(w, d.ProgramDescriptors) @@ -149,9 +144,9 @@ func writePMTSection(w *astikit.BitsWriter, d *PMTData) (int, error) { bytesWritten += n for _, es := range d.ElementaryStreams { - b.Write(uint8(es.StreamType)) - b.WriteN(uint8(0xff), 3) - b.WriteN(es.ElementaryPID, 13) + w.TryWriteByte(uint8(es.StreamType)) + w.TryWriteBits(0xff, 3) + w.TryWriteBits(uint64(es.ElementaryPID), 13) bytesWritten += 3 n, err = writeDescriptorsWithLength(w, es.ElementaryStreamDescriptors) @@ -161,9 +156,10 @@ func writePMTSection(w *astikit.BitsWriter, d *PMTData) (int, error) { bytesWritten += n } - return bytesWritten, b.Err() + return bytesWritten, w.TryError } +// IsVideo . func (t StreamType) IsVideo() bool { switch t { case StreamTypeMPEG1Video, @@ -179,6 +175,7 @@ func (t StreamType) IsVideo() bool { return false } +// IsAudio . func (t StreamType) IsAudio() bool { switch t { case StreamTypeMPEG1Audio, @@ -194,7 +191,7 @@ func (t StreamType) IsAudio() bool { return false } -func (t StreamType) String() string { +func (t StreamType) String() string { //nolint:funlen switch t { case StreamTypeMPEG1Video: return "MPEG1 Video" @@ -240,6 +237,7 @@ func (t StreamType) String() string { return "Unknown" } +// ToPESStreamID . func (t StreamType) ToPESStreamID() uint8 { switch t { case StreamTypeMPEG1Video, StreamTypeMPEG2Video, StreamTypeMPEG4Video, StreamTypeH264Video, diff --git a/data_pmt_test.go b/data_pmt_test.go index 74d7a58..728fced 100644 --- a/data_pmt_test.go +++ b/data_pmt_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -21,29 +21,30 @@ var pmt = &PMTData{ func pmtBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("111") // Reserved bits - w.Write("1010101010101") // PCR PID - w.Write("1111") // Reserved - descriptorsBytes(w) // Program descriptors - w.Write(uint8(StreamTypeMPEG1Audio)) // Stream #1 stream type - w.Write("111") // Stream #1 reserved - w.Write("0101010101010") // Stream #1 PID - w.Write("1111") // Stream #1 reserved - descriptorsBytes(w) // Stream #1 descriptors + w := bitio.NewWriter(buf) + WriteBinary(w, "111") // Reserved bits + WriteBinary(w, "1010101010101") // PCR PID + WriteBinary(w, "1111") // Reserved + descriptorsBytes(w) // Program descriptors + w.WriteByte(uint8(StreamTypeMPEG1Audio)) // Stream #1 stream type + WriteBinary(w, "111") // Stream #1 reserved + WriteBinary(w, "0101010101010") // Stream #1 PID + WriteBinary(w, "1111") // Stream #1 reserved + descriptorsBytes(w) // Stream #1 descriptors return buf.Bytes() } func TestParsePMTSection(t *testing.T) { - var b = pmtBytes() - d, err := parsePMTSection(astikit.NewBytesIterator(b), len(b), uint16(1)) + b := pmtBytes() + r := bitio.NewCountReader(bytes.NewReader(b)) + d, err := parsePMTSection(r, int64(len(b)*8), uint16(1)) assert.Equal(t, d, pmt) assert.NoError(t, err) } func TestWritePMTSection(t *testing.T) { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) n, err := writePMTSection(w, pmt) assert.NoError(t, err) assert.Equal(t, n, buf.Len()) @@ -55,7 +56,8 @@ func BenchmarkParsePMTSection(b *testing.B) { bs := pmtBytes() for i := 0; i < b.N; i++ { - parsePMTSection(astikit.NewBytesIterator(bs), len(bs), uint16(1)) + r := bitio.NewCountReader(bytes.NewReader(bs)) + parsePMTSection(r, int64(len(bs)), uint16(1)) } } @@ -64,7 +66,7 @@ func BenchmarkWritePMTSection(b *testing.B) { bw := &bytes.Buffer{} bw.Grow(1024) - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: bw}) + w := bitio.NewWriter(bw) for i := 0; i < b.N; i++ { bw.Reset() diff --git a/data_psi.go b/data_psi.go index e0c3e78..7b9bb68 100644 --- a/data_psi.go +++ b/data_psi.go @@ -1,12 +1,13 @@ package astits import ( + "errors" "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// PSI table IDs +// PSI table IDs. const ( PSITableTypeBAT = "BAT" PSITableTypeDIT = "DIT" @@ -24,8 +25,10 @@ const ( PSITableTypeUnknown = "Unknown" ) +// PSITableID . type PSITableID uint16 +// PSITableIDs. const ( PSITableIDPAT PSITableID = 0x00 PSITableIDPMT PSITableID = 0x02 @@ -46,45 +49,88 @@ const ( PSITableIDNITVariant2 PSITableID = 0x41 ) -// PSIData represents a PSI data +// PSIData represents a PSI data. // https://en.wikipedia.org/wiki/Program-specific_information type PSIData struct { - PointerField int // Present at the start of the TS packet payload signaled by the payload_unit_start_indicator bit in the TS header. Used to set packet alignment bytes or content before the start of tabled payload data. + // PointerField it present at the start of the TS packet + // payload signaled by the payload_unit_start_indicator + // bit in the TS header. Used to set packet alignment + // bytes or content before the start of tabled payload data. + PointerField int Sections []*PSISection } -// PSISection represents a PSI section +// PSISection represents a PSI section. type PSISection struct { - CRC32 uint32 // A checksum of the entire table excluding the pointer field, pointer filler bytes and the trailing CRC32. + // CRC32 checksum of the entire table excluding the pointer + // field, pointer filler bytes and the trailing CRC32. + CRC32 uint32 Header *PSISectionHeader Syntax *PSISectionSyntax } -// PSISectionHeader represents a PSI section header +// PSISectionHeader represents a PSI section header. type PSISectionHeader struct { - PrivateBit bool // The PAT, PMT, and CAT all set this to 0. Other tables set this to 1. - SectionLength uint16 // The number of bytes that follow for the syntax section (with CRC value) and/or table data. These bytes must not exceed a value of 1021. - SectionSyntaxIndicator bool // A flag that indicates if the syntax section follows the section length. The PAT, PMT, and CAT all set this to 1. - TableID PSITableID // Table Identifier, that defines the structure of the syntax section and other contained data. As an exception, if this is the byte that immediately follow previous table section and is set to 0xFF, then it indicates that the repeat of table section end here and the rest of TS data payload shall be stuffed with 0xFF. Consequently the value 0xFF shall not be used for the Table Identifier. - TableType string + // PrivateBit The PAT, PMT, and CAT all set this to -1. + // Other tables set this to 1. + PrivateBit bool + + // SectionLength The number of bytes that follow for the + // syntax section (with CRC value) and/or table data. + // These bytes must not exceed a value of 1021. + SectionLength uint16 + + // A flag that indicates if the syntax section + // follows the section length. The PAT, PMT, + // and CAT all set this to 1. + SectionSyntaxIndicator bool + + // TableID that defines the structure of the syntax + // section and other contained data. As an exception, + // if this is the byte that immediately follow previous + // table section and is set to 0xFF, then it indicates + // that the repeat of table section end here and the rest of + // TS data payload shall be stuffed with 0xFF. Consequently + // the value 0xFF shall not be used for the Table Identifier. + TableID PSITableID + + TableType string } -// PSISectionSyntax represents a PSI section syntax +// PSISectionSyntax represents a PSI section syntax. type PSISectionSyntax struct { Data *PSISectionSyntaxData Header *PSISectionSyntaxHeader } -// PSISectionSyntaxHeader represents a PSI section syntax header +// PSISectionSyntaxHeader represents a PSI section syntax header. type PSISectionSyntaxHeader struct { - CurrentNextIndicator bool // Indicates if data is current in effect or is for future use. If the bit is flagged on, then the data is to be used at the present moment. - LastSectionNumber uint8 // This indicates which table is the last table in the sequence of tables. - SectionNumber uint8 // This is an index indicating which table this is in a related sequence of tables. The first table starts from 0. - TableIDExtension uint16 // Informational only identifier. The PAT uses this for the transport stream identifier and the PMT uses this for the Program number. - VersionNumber uint8 // Syntax version number. Incremented when data is changed and wrapped around on overflow for values greater than 32. + // TableIDExtension Informational only identifier. + // The PAT uses this for the transport stream identifier + // and the PMT uses this for the Program number. + TableIDExtension uint16 + + // VersionNumber Syntax version number. + // Incremented when data is changed and wrapped + // around on overflow for values greater than 32. + VersionNumber uint8 // 5 bits. + + // CurrentNextIndicator Indicates if data is current in + // effect or is for future use. If the bit is flagged on, + // then the data is to be used at the present moment. + CurrentNextIndicator bool + + // LastSectionNumber indicates which table is + // the last table in the sequence of tables. + LastSectionNumber uint8 + + // SectionNumber is an index indicating which table + // this is in a related sequence of tables. + // The first table starts from 0. + SectionNumber uint8 } -// PSISectionSyntaxData represents a PSI section syntax data +// PSISectionSyntaxData represents a PSI section syntax data. type PSISectionSyntaxData struct { EIT *EITData NIT *NITData @@ -94,168 +140,141 @@ type PSISectionSyntaxData struct { TOT *TOTData } -// parsePSIData parses a PSI data -func parsePSIData(i *astikit.BytesIterator) (d *PSIData, err error) { - // Init data - d = &PSIData{} +// parsePSIData parses a PSI data. +func parsePSIData(r *bitio.CountReader) (*PSIData, error) { + d := &PSIData{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + d.PointerField = int(r.TryReadByte()) - // Pointer field - d.PointerField = int(b) + // Pointer filler bytes. + skip := make([]byte, d.PointerField) + TryReadFull(r, skip) - // Pointer filler bytes - i.Skip(d.PointerField) - - // Parse sections var s *PSISection var stop bool - for i.HasBytesLeft() && !stop { - if s, stop, err = parsePSISection(i); err != nil { - err = fmt.Errorf("astits: parsing PSI table failed: %w", err) - return + var err error + for !stop { + if s, stop, err = parsePSISection(r); err != nil { + err = fmt.Errorf("parsing PSI table failed: %w", err) + return nil, err } d.Sections = append(d.Sections, s) } - return + return d, r.TryError } -// parsePSISection parses a PSI section -func parsePSISection(i *astikit.BytesIterator) (s *PSISection, stop bool, err error) { - // Init section - s = &PSISection{} +// ErrPSIInvalidCRC32 . +var ErrPSIInvalidCRC32 = errors.New("computed CRC32 doesn't match table CRC32") - // Parse header - var offsetStart, offsetSectionsEnd, offsetEnd int - if s.Header, offsetStart, _, offsetSectionsEnd, offsetEnd, err = parsePSISectionHeader(i); err != nil { - err = fmt.Errorf("astits: parsing PSI section header failed: %w", err) - return +// parsePSISection parses a PSI section. +func parsePSISection(i *bitio.CountReader) (*PSISection, bool, error) { + cr := NewCRC32Reader(i) + r := bitio.NewCountReader(cr) + r.BitsCount = i.BitsCount + + s := &PSISection{} + + header, offsetSectionsEnd, offsetEnd, err := parsePSISectionHeader(r) + if err != nil { + return nil, false, fmt.Errorf("parsing PSI section header failed: %w", err) } + s.Header = header - // Check whether we need to stop the parsing + // Check whether we need to stop the parsing. if shouldStopPSIParsing(s.Header.TableID) { - stop = true - return + return s, true, nil } - // Check whether there's a syntax section - if s.Header.SectionLength > 0 { - // Parse syntax - if s.Syntax, err = parsePSISectionSyntax(i, s.Header, offsetSectionsEnd); err != nil { - err = fmt.Errorf("astits: parsing PSI section syntax failed: %w", err) - return + // Check whether there's a syntax section. + if s.Header.SectionLength <= 0 { + // Go to the end of the section. + if offsetEnd > r.BitsCount { + skip := make([]byte, (offsetEnd-r.BitsCount)/8) + TryReadFull(r, skip) } + return s, false, nil + } - // Process CRC32 - if s.Header.TableID.hasCRC32() { - // Seek to the end of the sections - i.Seek(offsetSectionsEnd) + if s.Syntax, err = parsePSISectionSyntax(r, s.Header, offsetSectionsEnd); err != nil { + return nil, false, fmt.Errorf("parsing PSI section syntax failed: %w", err) + } - // Parse CRC32 - if s.CRC32, err = parseCRC32(i); err != nil { - err = fmt.Errorf("astits: parsing CRC32 failed: %w", err) - return - } + if s.Header.TableID.hasCRC32() { + computedCRC32 := cr.CRC32() - // Get CRC32 data - i.Seek(offsetStart) - var crc32Data []byte - if crc32Data, err = i.NextBytesNoCopy(offsetSectionsEnd - offsetStart); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + // Go to the end of the sections. + if offsetSectionsEnd > r.BitsCount { + skip := make([]byte, (offsetSectionsEnd-r.BitsCount)/8) + TryReadFull(r, skip) + } - // Compute CRC32 - crc32 := computeCRC32(crc32Data) + if s.CRC32, err = parseCRC32(r); err != nil { + return nil, false, fmt.Errorf("parsing table CRC32 failed: %w", err) + } - // Check CRC32 - if crc32 != s.CRC32 { - err = fmt.Errorf("astits: Table CRC32 %x != computed CRC32 %x", s.CRC32, crc32) - return - } + if computedCRC32 != s.CRC32 { + return nil, false, fmt.Errorf("%w computed=%v table=%v", + ErrPSIInvalidCRC32, computedCRC32, s.CRC32) } } - // Seek to the end of the section - i.Seek(offsetEnd) - return + if offsetEnd > r.BitsCount { + skip := make([]byte, (offsetEnd-r.BitsCount)/8) + TryReadFull(r, skip) + } + + return s, false, r.TryError } -// parseCRC32 parses a CRC32 -func parseCRC32(i *astikit.BytesIterator) (c uint32, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - c = uint32(bs[0])<<24 | uint32(bs[1])<<16 | uint32(bs[2])<<8 | uint32(bs[3]) - return +// parseCRC32 parses a CRC32. +func parseCRC32(r *bitio.CountReader) (uint32, error) { + c := uint32(r.TryReadBits(32)) + return c, r.TryError } -// shouldStopPSIParsing checks whether the PSI parsing should be stopped +// shouldStopPSIParsing checks whether the PSI parsing should be stopped. func shouldStopPSIParsing(tableID PSITableID) bool { return tableID == PSITableIDNull || tableID.isUnknown() } -// parsePSISectionHeader parses a PSI section header -func parsePSISectionHeader(i *astikit.BytesIterator) (h *PSISectionHeader, offsetStart, offsetSectionsStart, offsetSectionsEnd, offsetEnd int, err error) { - // Init +// parsePSISectionHeader parses a PSI section header. +func parsePSISectionHeader(r *bitio.CountReader) ( + h *PSISectionHeader, + offsetSectionsEnd, + offsetEnd int64, + err error, +) { h = &PSISectionHeader{} - offsetStart = i.Offset() - - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - // Table ID - h.TableID = PSITableID(b) + tableID := r.TryReadByte() + h.TableID = PSITableID(tableID) - // Table type h.TableType = h.TableID.Type() - // Check whether we need to stop the parsing + // Check whether we need to stop the parsing. if shouldStopPSIParsing(h.TableID) { return } - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Section syntax indicator - h.SectionSyntaxIndicator = bs[0]&0x80 > 0 - - // Private bit - h.PrivateBit = bs[0]&0x40 > 0 - - // Section length - h.SectionLength = uint16(bs[0]&0xf)<<8 | uint16(bs[1]) + h.SectionSyntaxIndicator = r.TryReadBool() + h.PrivateBit = r.TryReadBool() + _ = r.TryReadBits(2) // Reserved. + h.SectionLength = uint16(r.TryReadBits(12)) // Offsets - offsetSectionsStart = i.Offset() - offsetEnd = offsetSectionsStart + int(h.SectionLength) + offsetSectionsStart := r.BitsCount + offsetEnd = offsetSectionsStart + int64(h.SectionLength*8) offsetSectionsEnd = offsetEnd if h.TableID.hasCRC32() { - offsetSectionsEnd -= 4 + offsetSectionsEnd -= 4 * 8 } - return + + return h, offsetSectionsEnd, offsetEnd, r.TryError } -// PSITableID.Type() returns the psi table type based on the table id +// Type returns the psi table type based on the table id. // Page: 28 | https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf func (t PSITableID) Type() string { switch { case t == PSITableIDBAT: @@ -289,7 +308,7 @@ func (t PSITableID) Type() string { } } -// hasPSISyntaxHeader checks whether the section has a syntax header +// hasPSISyntaxHeader checks whether the section has a syntax header. func (t PSITableID) hasPSISyntaxHeader() bool { return t == PSITableIDPAT || t == PSITableIDPMT || @@ -298,7 +317,7 @@ func (t PSITableID) hasPSISyntaxHeader() bool { (t >= PSITableIDEITStart && t <= PSITableIDEITEnd) } -// hasCRC32 checks whether the table has a CRC32 +// hasCRC32 checks whether the table has a CRC32. func (t PSITableID) hasCRC32() bool { return t == PSITableIDPAT || t == PSITableIDPMT || @@ -330,136 +349,106 @@ func (t PSITableID) isUnknown() bool { return true } -// parsePSISectionSyntax parses a PSI section syntax -func parsePSISectionSyntax(i *astikit.BytesIterator, h *PSISectionHeader, offsetSectionsEnd int) (s *PSISectionSyntax, err error) { - // Init - s = &PSISectionSyntax{} +// parsePSISectionSyntax parses a PSI section syntax. +func parsePSISectionSyntax( + r *bitio.CountReader, + h *PSISectionHeader, + offsetSectionsEnd int64, +) (*PSISectionSyntax, error) { + s := &PSISectionSyntax{} + var err error - // Header if h.TableID.hasPSISyntaxHeader() { - if s.Header, err = parsePSISectionSyntaxHeader(i); err != nil { - err = fmt.Errorf("astits: parsing PSI section syntax header failed: %w", err) - return + s.Header, err = parsePSISectionSyntaxHeader(r) + if err != nil { + return nil, fmt.Errorf("parsing PSI section syntax header failed: %w", err) } } - // Parse data - if s.Data, err = parsePSISectionSyntaxData(i, h, s.Header, offsetSectionsEnd); err != nil { - err = fmt.Errorf("astits: parsing PSI section syntax data failed: %w", err) - return - } - return -} - -// parsePSISectionSyntaxHeader parses a PSI section syntax header -func parsePSISectionSyntaxHeader(i *astikit.BytesIterator) (h *PSISectionSyntaxHeader, err error) { - // Init - h = &PSISectionSyntaxHeader{} - - // Get next 2 bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Table ID extension - h.TableIDExtension = uint16(bs[0])<<8 | uint16(bs[1]) - - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return + s.Data, err = parsePSISectionSyntaxData(r, h, s.Header, offsetSectionsEnd) + if err != nil { + return nil, fmt.Errorf("parsing PSI section syntax data failed: %w", err) } - // Version number - h.VersionNumber = uint8(b&0x3f) >> 1 + return s, nil +} - // Current/Next indicator - h.CurrentNextIndicator = b&0x1 > 0 +// parsePSISectionSyntaxHeader parses a PSI section syntax header. +func parsePSISectionSyntaxHeader(r *bitio.CountReader) (*PSISectionSyntaxHeader, error) { + h := &PSISectionSyntaxHeader{} - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + h.TableIDExtension = uint16(r.TryReadBits(16)) - // Section number - h.SectionNumber = uint8(b) + _ = r.TryReadBits(2) // Reserved. + h.VersionNumber = uint8(r.TryReadBits(5)) + h.CurrentNextIndicator = r.TryReadBool() - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + h.SectionNumber = r.TryReadByte() - // Last section number - h.LastSectionNumber = uint8(b) - return + h.LastSectionNumber = r.TryReadByte() + return h, r.TryError } -// parsePSISectionSyntaxData parses a PSI section data -func parsePSISectionSyntaxData(i *astikit.BytesIterator, h *PSISectionHeader, sh *PSISectionSyntaxHeader, offsetSectionsEnd int) (d *PSISectionSyntaxData, err error) { - // Init - d = &PSISectionSyntaxData{} - - // Switch on table type +// parsePSISectionSyntaxData parses a PSI section data. +func parsePSISectionSyntaxData( + r *bitio.CountReader, + h *PSISectionHeader, + sh *PSISectionSyntaxHeader, + offsetSectionsEnd int64, +) (*PSISectionSyntaxData, error) { + d := &PSISectionSyntaxData{} + var err error + + // Switch on table type. switch h.TableID { case PSITableIDBAT: - // TODO Parse BAT + // TODO Parse BAT. case PSITableIDDIT: - // TODO Parse DIT + // TODO Parse DIT. case PSITableIDNITVariant1, PSITableIDNITVariant2: - if d.NIT, err = parseNITSection(i, sh.TableIDExtension); err != nil { - err = fmt.Errorf("astits: parsing NIT section failed: %w", err) - return + if d.NIT, err = parseNITSection(r, sh.TableIDExtension); err != nil { + return nil, fmt.Errorf("parsing NIT section failed: %w", err) } case PSITableIDPAT: - if d.PAT, err = parsePATSection(i, offsetSectionsEnd, sh.TableIDExtension); err != nil { - err = fmt.Errorf("astits: parsing PAT section failed: %w", err) - return + if d.PAT, err = parsePATSection(r, offsetSectionsEnd, sh.TableIDExtension); err != nil { + return nil, fmt.Errorf("parsing PAT section failed: %w", err) } case PSITableIDPMT: - if d.PMT, err = parsePMTSection(i, offsetSectionsEnd, sh.TableIDExtension); err != nil { - err = fmt.Errorf("astits: parsing PMT section failed: %w", err) - return + if d.PMT, err = parsePMTSection(r, offsetSectionsEnd, sh.TableIDExtension); err != nil { + return nil, fmt.Errorf("parsing PMT section failed: %w", err) } case PSITableIDRST: - // TODO Parse RST + // TODO Parse RST. case PSITableIDSDTVariant1, PSITableIDSDTVariant2: - if d.SDT, err = parseSDTSection(i, offsetSectionsEnd, sh.TableIDExtension); err != nil { - err = fmt.Errorf("astits: parsing PMT section failed: %w", err) - return + if d.SDT, err = parseSDTSection(r, offsetSectionsEnd, sh.TableIDExtension); err != nil { + return nil, fmt.Errorf("parsing PMT section failed: %w", err) } case PSITableIDSIT: - // TODO Parse SIT + // TODO Parse SIT. case PSITableIDST: - // TODO Parse ST + // TODO Parse ST. case PSITableIDTOT: - if d.TOT, err = parseTOTSection(i); err != nil { - err = fmt.Errorf("astits: parsing TOT section failed: %w", err) - return + if d.TOT, err = parseTOTSection(r); err != nil { + return nil, fmt.Errorf("parsing TOT section failed: %w", err) } case PSITableIDTDT: - // TODO Parse TDT + // TODO Parse TDT. } if h.TableID >= PSITableIDEITStart && h.TableID <= PSITableIDEITEnd { - if d.EIT, err = parseEITSection(i, offsetSectionsEnd, sh.TableIDExtension); err != nil { - err = fmt.Errorf("astits: parsing EIT section failed: %w", err) - return + if d.EIT, err = parseEITSection(r, offsetSectionsEnd, sh.TableIDExtension); err != nil { + return nil, fmt.Errorf("parsing EIT section failed: %w", err) } } - return + return d, nil } -// toData parses the PSI tables and returns a set of DemuxerData +// toData parses the PSI tables and returns a set of DemuxerData. func (d *PSIData) toData(firstPacket *Packet, pid uint16) (ds []*DemuxerData) { - // Loop through sections + // Loop through sections. for _, s := range d.Sections { - // Switch on table type + // Switch on table type. switch s.Header.TableID { case PSITableIDNITVariant1, PSITableIDNITVariant2: ds = append(ds, &DemuxerData{FirstPacket: firstPacket, NIT: s.Syntax.Data.NIT, PID: pid}) @@ -473,40 +462,44 @@ func (d *PSIData) toData(firstPacket *Packet, pid uint16) (ds []*DemuxerData) { ds = append(ds, &DemuxerData{FirstPacket: firstPacket, PID: pid, TOT: s.Syntax.Data.TOT}) } if s.Header.TableID >= PSITableIDEITStart && s.Header.TableID <= PSITableIDEITEnd { - ds = append(ds, &DemuxerData{EIT: s.Syntax.Data.EIT, FirstPacket: firstPacket, PID: pid}) + data := &DemuxerData{ + EIT: s.Syntax.Data.EIT, + FirstPacket: firstPacket, + PID: pid, + } + ds = append(ds, data) } } return } -func writePSIData(w *astikit.BitsWriter, d *PSIData) (int, error) { - b := astikit.NewBitsWriterBatch(w) - b.Write(uint8(d.PointerField)) +func writePSIData(w *bitio.Writer, d *PSIData) error { + w.TryWriteByte(uint8(d.PointerField)) for i := 0; i < d.PointerField; i++ { - b.Write(uint8(0x00)) + w.TryWriteByte(0x00) } bytesWritten := 1 + d.PointerField - if err := b.Err(); err != nil { - return 0, err + if err := w.TryError; err != nil { + return fmt.Errorf("write: %w", w.TryError) } for _, s := range d.Sections { n, err := writePSISection(w, s) if err != nil { - return 0, err + return fmt.Errorf("writing PSI sections failed: %w", err) } bytesWritten += n } - return bytesWritten, nil + return nil } func calcPSISectionLength(s *PSISection) uint16 { ret := uint16(0) if s.Header.TableID.hasPSISyntaxHeader() { - ret += 5 // PSI syntax header length + ret += 5 // PSI syntax header length. } switch s.Header.TableID { @@ -523,81 +516,83 @@ func calcPSISectionLength(s *PSISection) uint16 { return ret } -func writePSISection(w *astikit.BitsWriter, s *PSISection) (int, error) { +// ErrPSIUnsupportedTable . +var ErrPSIUnsupportedTable = errors.New("unsupported table") + +func writePSISection(w *bitio.Writer, s *PSISection) (int, error) { if s.Header.TableID != PSITableIDPAT && s.Header.TableID != PSITableIDPMT { - return 0, fmt.Errorf("writePSISection: table %s is not implemented", s.Header.TableID.Type()) + return 0, fmt.Errorf("%w: %s", ErrPSIUnsupportedTable, s.Header.TableID.Type()) } - b := astikit.NewBitsWriterBatch(w) - sectionLength := calcPSISectionLength(s) - sectionCRC32 := crc32Polynomial + + var cw *CRC32Writer if s.Header.TableID.hasCRC32() { - w.SetWriteCallback(func(bs []byte) { - sectionCRC32 = updateCRC32(sectionCRC32, bs) - }) - defer w.SetWriteCallback(nil) + cw = NewCRC32Writer(w) + w = bitio.NewWriter(cw) } - b.Write(uint8(s.Header.TableID)) - b.Write(s.Header.SectionSyntaxIndicator) - b.Write(s.Header.PrivateBit) - b.WriteN(uint8(0xff), 2) - b.WriteN(sectionLength, 12) + w.TryWriteByte(uint8(s.Header.TableID)) + + w.TryWriteBool(s.Header.SectionSyntaxIndicator) + w.TryWriteBool(s.Header.PrivateBit) + w.TryWriteBits(0xff, 2) + w.TryWriteBits(uint64(sectionLength), 12) bytesWritten := 3 if s.Header.SectionLength > 0 { n, err := writePSISectionSyntax(w, s) if err != nil { - return 0, err + return 0, fmt.Errorf("writing PSI section syntax failed: %w", err) } bytesWritten += n if s.Header.TableID.hasCRC32() { - b.Write(sectionCRC32) + w.TryWriteBits(uint64(cw.CRC32()), 32) bytesWritten += 4 } } - return bytesWritten, b.Err() + return bytesWritten, w.TryError } -func writePSISectionSyntax(w *astikit.BitsWriter, s *PSISection) (int, error) { +func writePSISectionSyntax(w *bitio.Writer, s *PSISection) (int, error) { bytesWritten := 0 if s.Header.TableID.hasPSISyntaxHeader() { n, err := writePSISectionSyntaxHeader(w, s.Syntax.Header) if err != nil { - return 0, err + return 0, fmt.Errorf("header: %w", err) } bytesWritten += n } n, err := writePSISectionSyntaxData(w, s.Syntax.Data, s.Header.TableID) if err != nil { - return 0, err + return 0, fmt.Errorf("data: %w", err) } bytesWritten += n return bytesWritten, nil } -func writePSISectionSyntaxHeader(w *astikit.BitsWriter, h *PSISectionSyntaxHeader) (int, error) { - b := astikit.NewBitsWriterBatch(w) +func writePSISectionSyntaxHeader(w *bitio.Writer, h *PSISectionSyntaxHeader) (int, error) { + w.TryWriteBits(uint64(h.TableIDExtension), 16) + + w.TryWriteBits(0xff, 2) // Reserved. + w.TryWriteBits(uint64(h.VersionNumber), 5) + w.TryWriteBool(h.CurrentNextIndicator) + + w.TryWriteByte(h.SectionNumber) - b.Write(h.TableIDExtension) - b.WriteN(uint8(0xff), 2) - b.WriteN(h.VersionNumber, 5) - b.Write(h.CurrentNextIndicator) - b.Write(h.SectionNumber) - b.Write(h.LastSectionNumber) + w.TryWriteByte(h.LastSectionNumber) - return 5, b.Err() + return 5, w.TryError } -func writePSISectionSyntaxData(w *astikit.BitsWriter, d *PSISectionSyntaxData, tableID PSITableID) (int, error) { +func writePSISectionSyntaxData(w *bitio.Writer, d *PSISectionSyntaxData, tableID PSITableID) (int, error) { switch tableID { - // TODO write other table types + // TODO write other table types. case PSITableIDPAT: return writePATSection(w, d.PAT) case PSITableIDPMT: diff --git a/data_psi_test.go b/data_psi_test.go index a452222..2c481b5 100644 --- a/data_psi_test.go +++ b/data_psi_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -100,78 +100,81 @@ var psi = &PSIData{ func psiBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(4)) // Pointer field + w := bitio.NewWriter(buf) + w.WriteByte(4) // Pointer field w.Write([]byte("test")) // Pointer field bytes - w.Write(uint8(78)) // EIT table ID - w.Write("1") // EIT syntax section indicator - w.Write("1") // EIT private bit - w.Write("11") // EIT reserved - w.Write("000000011110") // EIT section length + w.WriteByte(78) // EIT table ID + WriteBinary(w, "1") // EIT syntax section indicator + WriteBinary(w, "1") // EIT private bit + WriteBinary(w, "11") // EIT reserved + WriteBinary(w, "000000011110") // EIT section length w.Write(psiSectionSyntaxHeaderBytes()) // EIT syntax section header w.Write(eitBytes()) // EIT data - w.Write(uint32(0x7ffc6102)) // EIT CRC32 - w.Write(uint8(64)) // NIT table ID - w.Write("1") // NIT syntax section indicator - w.Write("1") // NIT private bit - w.Write("11") // NIT reserved - w.Write("000000011001") // NIT section length + w.WriteBits(0x7ffc6102, 32) // EIT CRC32 + w.WriteByte(64) // NIT table ID + WriteBinary(w, "1") // NIT syntax section indicator + WriteBinary(w, "1") // NIT private bit + WriteBinary(w, "11") // NIT reserved + WriteBinary(w, "000000011001") // NIT section length w.Write(psiSectionSyntaxHeaderBytes()) // NIT syntax section header w.Write(nitBytes()) // NIT data - w.Write(uint32(0xfebaa941)) // NIT CRC32 - w.Write(uint8(0)) // PAT table ID - w.Write("1") // PAT syntax section indicator - w.Write("1") // PAT private bit - w.Write("11") // PAT reserved - w.Write("000000010001") // PAT section length + w.WriteBits(0xfebaa941, 32) // NIT CRC32 + w.WriteByte(0) // PAT table ID + WriteBinary(w, "1") // PAT syntax section indicator + WriteBinary(w, "1") // PAT private bit + WriteBinary(w, "11") // PAT reserved + WriteBinary(w, "000000010001") // PAT section length w.Write(psiSectionSyntaxHeaderBytes()) // PAT syntax section header w.Write(patBytes()) // PAT data - w.Write(uint32(0x60739f61)) // PAT CRC32 - w.Write(uint8(2)) // PMT table ID - w.Write("1") // PMT syntax section indicator - w.Write("1") // PMT private bit - w.Write("11") // PMT reserved - w.Write("000000011000") // PMT section length + w.WriteBits(0x60739f61, 32) // PAT CRC32 + w.WriteByte(2) // PMT table ID + WriteBinary(w, "1") // PMT syntax section indicator + WriteBinary(w, "1") // PMT private bit + WriteBinary(w, "11") // PMT reserved + WriteBinary(w, "000000011000") // PMT section length w.Write(psiSectionSyntaxHeaderBytes()) // PMT syntax section header w.Write(pmtBytes()) // PMT data - w.Write(uint32(0xc68442e8)) // PMT CRC32 - w.Write(uint8(66)) // SDT table ID - w.Write("1") // SDT syntax section indicator - w.Write("1") // SDT private bit - w.Write("11") // SDT reserved - w.Write("000000010100") // SDT section length + w.WriteBits(0xc68442e8, 32) // PMT CRC32 + w.WriteByte(66) // SDT table ID + WriteBinary(w, "1") // SDT syntax section indicator + WriteBinary(w, "1") // SDT private bit + WriteBinary(w, "11") // SDT reserved + WriteBinary(w, "000000010100") // SDT section length w.Write(psiSectionSyntaxHeaderBytes()) // SDT syntax section header w.Write(sdtBytes()) // SDT data - w.Write(uint32(0xef3751d6)) // SDT CRC32 - w.Write(uint8(115)) // TOT table ID - w.Write("1") // TOT syntax section indicator - w.Write("1") // TOT private bit - w.Write("11") // TOT reserved - w.Write("000000001110") // TOT section length + w.WriteBits(0xef3751d6, 32) // SDT CRC32 + w.WriteByte(115) // TOT table ID + WriteBinary(w, "1") // TOT syntax section indicator + WriteBinary(w, "1") // TOT private bit + WriteBinary(w, "11") // TOT reserved + WriteBinary(w, "000000001110") // TOT section length w.Write(totBytes()) // TOT data - w.Write(uint32(0x6969b13)) // TOT CRC32 - w.Write(uint8(254)) // Unknown table ID - w.Write(uint8(0)) // PAT table ID + w.WriteBits(0x6969b13, 32) // TOT CRC32 + w.WriteByte(254) // Unknown table ID + w.WriteByte(0) // PAT table ID return buf.Bytes() } func TestParsePSIData(t *testing.T) { // Invalid CRC32 buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(0)) // Pointer field - w.Write(uint8(115)) // TOT table ID - w.Write("1") // TOT syntax section indicator - w.Write("1") // TOT private bit - w.Write("11") // TOT reserved - w.Write("000000001110") // TOT section length - w.Write(totBytes()) // TOT data - w.Write(uint32(32)) // TOT CRC32 - _, err := parsePSIData(astikit.NewBytesIterator(buf.Bytes())) - assert.EqualError(t, err, "astits: parsing PSI table failed: astits: Table CRC32 20 != computed CRC32 6969b13") + w := bitio.NewWriter(buf) + w.WriteByte(0) // Pointer field + w.WriteByte(115) // TOT table ID + WriteBinary(w, "1") // TOT syntax section indicator + WriteBinary(w, "1") // TOT private bit + WriteBinary(w, "11") // TOT reserved + WriteBinary(w, "000000001110") // TOT section length + w.Write(totBytes()) // TOT data + w.WriteBits(32, 32) // TOT CRC32 + + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + _, err := parsePSIData(r) + assert.ErrorIs(t, err, ErrPSIInvalidCRC32) // Valid - d, err := parsePSIData(astikit.NewBytesIterator(psiBytes())) + r = bitio.NewCountReader(bytes.NewReader(psiBytes())) + d, err := parsePSIData(r) assert.NoError(t, err) assert.Equal(t, d, psi) } @@ -186,23 +189,24 @@ var psiSectionHeader = &PSISectionHeader{ func psiSectionHeaderBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(0)) // Table ID - w.Write("1") // Syntax section indicator - w.Write("1") // Private bit - w.Write("11") // Reserved - w.Write("101010101010") // Section length + w := bitio.NewWriter(buf) + w.WriteByte(0) // Table ID + WriteBinary(w, "1") // Syntax section indicator + WriteBinary(w, "1") // Private bit + WriteBinary(w, "11") // Reserved + WriteBinary(w, "101010101010") // Section length return buf.Bytes() } func TestParsePSISectionHeader(t *testing.T) { // Unknown table type buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(254)) // Table ID - w.Write("1") // Syntax section indicator - w.Write("0000000") // Finish the byte - d, _, _, _, _, err := parsePSISectionHeader(astikit.NewBytesIterator(buf.Bytes())) + w := bitio.NewWriter(buf) + w.WriteByte(254) // Table ID + WriteBinary(w, "1") // Syntax section indicator + WriteBinary(w, "0000000") // Finish the byte + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + d, _, _, err := parsePSISectionHeader(r) assert.Equal(t, d, &PSISectionHeader{ TableID: 254, TableType: PSITableTypeUnknown, @@ -210,12 +214,11 @@ func TestParsePSISectionHeader(t *testing.T) { assert.NoError(t, err) // Valid table type - d, offsetStart, offsetSectionsStart, offsetSectionsEnd, offsetEnd, err := parsePSISectionHeader(astikit.NewBytesIterator(psiSectionHeaderBytes())) + r = bitio.NewCountReader(bytes.NewReader(psiSectionHeaderBytes())) + d, offsetSectionsEnd, offsetEnd, err := parsePSISectionHeader(r) assert.Equal(t, d, psiSectionHeader) - assert.Equal(t, 0, offsetStart) - assert.Equal(t, 3, offsetSectionsStart) - assert.Equal(t, 2729, offsetSectionsEnd) - assert.Equal(t, 2733, offsetEnd) + assert.Equal(t, int64(2729*8), offsetSectionsEnd) + assert.Equal(t, int64(2733*8), offsetEnd) assert.NoError(t, err) } @@ -251,18 +254,19 @@ var psiSectionSyntaxHeader = &PSISectionSyntaxHeader{ func psiSectionSyntaxHeaderBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint16(1)) // Table ID extension - w.Write("11") // Reserved bits - w.Write("10101") // Version number - w.Write("1") // Current/next indicator - w.Write(uint8(2)) // Section number - w.Write(uint8(3)) // Last section number + w := bitio.NewWriter(buf) + w.WriteBits(1, 16) // Table ID extension + WriteBinary(w, "11") // Reserved bits + WriteBinary(w, "10101") // Version number + WriteBinary(w, "1") // Current/next indicator + w.WriteByte(2) // Section number + w.WriteByte(3) // Last section number return buf.Bytes() } func TestParsePSISectionSyntaxHeader(t *testing.T) { - h, err := parsePSISectionSyntaxHeader(astikit.NewBytesIterator(psiSectionSyntaxHeaderBytes())) + r := bitio.NewCountReader(bytes.NewReader(psiSectionSyntaxHeaderBytes())) + h, err := parsePSISectionSyntaxHeader(r) assert.Equal(t, psiSectionSyntaxHeader, h) assert.NoError(t, err) } @@ -281,24 +285,24 @@ func TestPSIToData(t *testing.T) { type psiDataTestCase struct { name string - bytesFunc func(*astikit.BitsWriter) + bytesFunc func(*bitio.Writer) data *PSIData } var psiDataTestCases = []psiDataTestCase{ { "PAT", - func(w *astikit.BitsWriter) { - w.Write(uint8(4)) // Pointer field + func(w *bitio.Writer) { + w.WriteByte(4) // Pointer field w.Write([]byte{0, 0, 0, 0}) // Pointer field bytes - w.Write(uint8(0)) // PAT table ID - w.Write("1") // PAT syntax section indicator - w.Write("1") // PAT private bit - w.Write("11") // PAT reserved - w.Write("000000010001") // PAT section length + w.WriteByte(0) // PAT table ID + WriteBinary(w, "1") // PAT syntax section indicator + WriteBinary(w, "1") // PAT private bit + WriteBinary(w, "11") // PAT reserved + WriteBinary(w, "000000010001") // PAT section length w.Write(psiSectionSyntaxHeaderBytes()) // PAT syntax section header w.Write(patBytes()) // PAT data - w.Write(uint32(0x60739f61)) // PAT CRC32 + w.WriteBits(0x60739f61, 32) // PAT CRC32 }, &PSIData{ PointerField: 4, @@ -322,17 +326,17 @@ var psiDataTestCases = []psiDataTestCase{ }, { "PMT", - func(w *astikit.BitsWriter) { - w.Write(uint8(4)) // Pointer field + func(w *bitio.Writer) { + w.WriteByte(4) // Pointer field w.Write([]byte{0, 0, 0, 0}) // Pointer field bytes - w.Write(uint8(2)) // PMT table ID - w.Write("1") // PMT syntax section indicator - w.Write("1") // PMT private bit - w.Write("11") // PMT reserved - w.Write("000000011000") // PMT section length + w.WriteByte(2) // PMT table ID + WriteBinary(w, "1") // PMT syntax section indicator + WriteBinary(w, "1") // PMT private bit + WriteBinary(w, "11") // PMT reserved + WriteBinary(w, "000000011000") // PMT section length w.Write(psiSectionSyntaxHeaderBytes()) // PMT syntax section header w.Write(pmtBytes()) // PMT data - w.Write(uint32(0xc68442e8)) // PMT CRC32 + w.WriteBits(0xc68442e8, 32) // PMT CRC32 }, &PSIData{ PointerField: 4, @@ -360,17 +364,16 @@ func TestWritePSIData(t *testing.T) { for _, tc := range psiDataTestCases { t.Run(tc.name, func(t *testing.T) { bufExpected := bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) tc.bytesFunc(wExpected) - n, err := writePSIData(wActual, tc.data) + err := writePSIData(wActual, tc.data) assert.NoError(t, err) - assert.Equal(t, bufExpected.Len(), n) - assert.Equal(t, n, bufActual.Len()) - assert.Equal(t, bufExpected.Bytes(), bufActual.Bytes()) + assert.Equal(t, bufActual.Len(), bufExpected.Len()) + assert.Equal(t, bufActual.Bytes(), bufExpected.Bytes()) }) } } @@ -378,6 +381,7 @@ func TestWritePSIData(t *testing.T) { func BenchmarkParsePSIData(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - parsePSIData(astikit.NewBytesIterator(psiBytes())) + r := bitio.NewCountReader(bytes.NewReader(psiBytes())) + parsePSIData(r) } } diff --git a/data_sdt.go b/data_sdt.go index f060f26..e102ba1 100644 --- a/data_sdt.go +++ b/data_sdt.go @@ -3,10 +3,10 @@ package astits import ( "fmt" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// Running statuses +// Running statuses. const ( RunningStatusNotRunning = 1 RunningStatusPausing = 3 @@ -16,93 +16,64 @@ const ( RunningStatusUndefined = 0 ) -// SDTData represents an SDT data +// SDTData represents an SDT data. // Page: 33 | Chapter: 5.2.3 | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf type SDTData struct { OriginalNetworkID uint16 Services []*SDTDataService TransportStreamID uint16 } -// SDTDataService represents an SDT data service +// SDTDataService represents an SDT data service. type SDTDataService struct { - Descriptors []*Descriptor - HasEITPresentFollowing bool // When true indicates that EIT present/following information for the service is present in the current TS - HasEITSchedule bool // When true indicates that EIT schedule information for the service is present in the current TS - HasFreeCSAMode bool // When true indicates that access to one or more streams may be controlled by a CA system. - RunningStatus uint8 - ServiceID uint16 -} - -// parseSDTSection parses an SDT section -func parseSDTSection(i *astikit.BytesIterator, offsetSectionsEnd int, tableIDExtension uint16) (d *SDTData, err error) { - // Create data - d = &SDTData{TransportStreamID: tableIDExtension} - - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Original network ID - d.OriginalNetworkID = uint16(bs[0])<<8 | uint16(bs[1]) - - // Reserved for future use - i.Skip(1) + Descriptors []*Descriptor - // Loop until end of section data is reached - for i.Offset() < offsetSectionsEnd { - // Create service - s := &SDTDataService{} + // When true indicates that EIT present/following + // information for the service is present in the current TS. + HasEITPresentFollowing bool - // Get next bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + // When true indicates that EIT schedule information + // for the service is present in the current TS. + HasEITSchedule bool - // Service ID - s.ServiceID = uint16(bs[0])<<8 | uint16(bs[1]) + // When true indicates that access to one or + // more streams may be controlled by a CA system. + HasFreeCSAMode bool + RunningStatus uint8 + ServiceID uint16 +} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } +// parseSDTSection parses an SDT section. +func parseSDTSection( + r *bitio.CountReader, + offsetSectionsEnd int64, + tableIDExtension uint16, +) (*SDTData, error) { + d := &SDTData{TransportStreamID: tableIDExtension} - // EIT schedule flag - s.HasEITSchedule = uint8(b&0x2) > 0 + d.OriginalNetworkID = uint16(r.TryReadBits(16)) - // EIT present/following flag - s.HasEITPresentFollowing = uint8(b&0x1) > 0 + _ = r.TryReadByte() // Reserved. - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + // Loop until end of section data is reached. + for r.BitsCount < offsetSectionsEnd { + s := &SDTDataService{} - // Running status - s.RunningStatus = uint8(b) >> 5 + s.ServiceID = uint16(r.TryReadBits(16)) - // Free CA mode - s.HasFreeCSAMode = uint8(b&0x10) > 0 + _ = r.TryReadBits(6) // Reserved. + s.HasEITSchedule = r.TryReadBool() + s.HasEITPresentFollowing = r.TryReadBool() - // We need to rewind since the current byte is used by the descriptor as well - i.Skip(-1) + s.RunningStatus = uint8(r.TryReadBits(3)) + s.HasFreeCSAMode = r.TryReadBool() - // Descriptors - if s.Descriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + var err error + if s.Descriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } - // Append service d.Services = append(d.Services, s) } - return + return d, r.TryError } diff --git a/data_sdt_test.go b/data_sdt_test.go index cc59c9d..7d96d32 100644 --- a/data_sdt_test.go +++ b/data_sdt_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -23,22 +23,23 @@ var sdt = &SDTData{ func sdtBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint16(2)) // Original network ID - w.Write(uint8(0)) // Reserved for future use - w.Write(uint16(3)) // Service #1 id - w.Write("000000") // Service #1 reserved for future use - w.Write("1") // Service #1 EIT schedule flag - w.Write("1") // Service #1 EIT present/following flag - w.Write("101") // Service #1 running status - w.Write("1") // Service #1 free CA mode - descriptorsBytes(w) // Service #1 descriptors + w := bitio.NewWriter(buf) + w.WriteBits(uint64(2), 16) // Original network ID + w.WriteByte(uint8(0)) // Reserved for future use + w.WriteBits(uint64(3), 16) // Service #1 id + WriteBinary(w, "000000") // Service #1 reserved for future use + WriteBinary(w, "1") // Service #1 EIT schedule flag + WriteBinary(w, "1") // Service #1 EIT present/following flag + WriteBinary(w, "101") // Service #1 running status + WriteBinary(w, "1") // Service #1 free CA mode + descriptorsBytes(w) // Service #1 descriptors return buf.Bytes() } func TestParseSDTSection(t *testing.T) { - var b = sdtBytes() - d, err := parseSDTSection(astikit.NewBytesIterator(b), len(b), uint16(1)) + b := sdtBytes() + r := bitio.NewCountReader(bytes.NewReader(b)) + d, err := parseSDTSection(r, int64(len(b)*8), uint16(1)) assert.Equal(t, d, sdt) assert.NoError(t, err) } diff --git a/data_test.go b/data_test.go index 72e0ea0..1dbb25f 100644 --- a/data_test.go +++ b/data_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -15,7 +15,7 @@ func TestParseData(t *testing.T) { // Custom parser cds := []*DemuxerData{{PID: 1}} - var c = func(ps []*Packet) (o []*DemuxerData, skip bool, err error) { + c := func(ps []*Packet) (o []*DemuxerData, skip bool, err error) { o = cds skip = true return @@ -79,10 +79,10 @@ func TestIsPSIPayload(t *testing.T) { func TestIsPESPayload(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("0000000000000001") + w := bitio.NewWriter(buf) + WriteBinary(w, "0000000000000001") assert.False(t, isPESPayload(buf.Bytes())) buf.Reset() - w.Write("000000000000000000000001") + WriteBinary(w, "000000000000000000000001") assert.True(t, isPESPayload(buf.Bytes())) } diff --git a/data_tot.go b/data_tot.go index 0bd64d2..5ecf104 100644 --- a/data_tot.go +++ b/data_tot.go @@ -4,32 +4,33 @@ import ( "fmt" "time" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// TOTData represents a TOT data -// Page: 39 | Chapter: 5.2.6 | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf +// TOTData represents a TOT data. +// Page: 39 | Chapter: 5.2.6 | Link: +// https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf type TOTData struct { Descriptors []*Descriptor UTCTime time.Time } -// parseTOTSection parses a TOT section -func parseTOTSection(i *astikit.BytesIterator) (d *TOTData, err error) { - // Create data - d = &TOTData{} +// parseTOTSection parses a TOT section. +func parseTOTSection(r *bitio.CountReader) (*TOTData, error) { + d := &TOTData{} - // UTC time - if d.UTCTime, err = parseDVBTime(i); err != nil { - err = fmt.Errorf("astits: parsing DVB time failed: %w", err) - return + var err error + if d.UTCTime, err = parseDVBTime(r); err != nil { + return nil, fmt.Errorf("parsing DVB time failed: %w", err) + } + + if _, err = r.ReadBits(4); err != nil { + return nil, fmt.Errorf("read: %w", err) } // Descriptors - if d.Descriptors, err = parseDescriptors(i); err != nil { - err = fmt.Errorf("astits: parsing descriptors failed: %w", err) - return + if d.Descriptors, err = parseDescriptors(r); err != nil { + return nil, fmt.Errorf("parsing descriptors failed: %w", err) } - return + return d, nil } diff --git a/data_tot_test.go b/data_tot_test.go index 0f2756c..050bcb9 100644 --- a/data_tot_test.go +++ b/data_tot_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -15,15 +15,16 @@ var tot = &TOTData{ func totBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(dvbTimeBytes) // UTC time - w.Write("0000") // Reserved - descriptorsBytes(w) // Service #1 descriptors + w := bitio.NewWriter(buf) + w.Write(dvbTimeBytes) // UTC time. + WriteBinary(w, "0000") // Reserved. + descriptorsBytes(w) // Service #1 descriptors. return buf.Bytes() } func TestParseTOTSection(t *testing.T) { - d, err := parseTOTSection(astikit.NewBytesIterator(totBytes())) + r := bitio.NewCountReader(bytes.NewReader(totBytes())) + d, err := parseTOTSection(r) assert.Equal(t, d, tot) assert.NoError(t, err) } diff --git a/demuxer.go b/demuxer.go index 5c6dd50..2d86b5c 100644 --- a/demuxer.go +++ b/demuxer.go @@ -7,16 +7,13 @@ import ( "io" ) -// Sync byte +// Sync byte. const syncByte = '\x47' -// Errors -var ( - ErrNoMorePackets = errors.New("astits: no more packets") - ErrPacketMustStartWithASyncByte = errors.New("astits: packet must start with a sync byte") -) +// ErrPacketStartSyncByte packet must start with a sync byte. +var ErrPacketStartSyncByte = errors.New("packet must start with a sync byte") -// Demuxer represents a demuxer +// Demuxer represents a demuxer. // https://en.wikipedia.org/wiki/MPEG_transport_stream // http://seidl.cs.vsb.cz/download/dvb/DVB_Poster.pdf // http://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.13.01_40/en_300468v011301o.pdf @@ -31,11 +28,14 @@ type Demuxer struct { r io.Reader } -// PacketsParser represents an object capable of parsing a set of packets containing a unique payload spanning over those packets -// Use the skip returned argument to indicate whether the default process should still be executed on the set of packets +// PacketsParser represents an object capable of parsing +// a set of packets containing a unique payload spanning +// over those packets. Use the skip returned argument +// to indicate whether the default process should +// still be executed on the set of packets. type PacketsParser func(ps []*Packet) (ds []*DemuxerData, skip bool, err error) -// NewDemuxer creates a new transport stream based on a reader +// NewDemuxer creates a new transport stream based on a reader. func NewDemuxer(ctx context.Context, r io.Reader, opts ...func(*Demuxer)) (d *Demuxer) { // Init d = &Demuxer{ @@ -53,121 +53,118 @@ func NewDemuxer(ctx context.Context, r io.Reader, opts ...func(*Demuxer)) (d *De return } -// DemuxerOptPacketSize returns the option to set the packet size +// DemuxerOptPacketSize returns the option to set the packet size. func DemuxerOptPacketSize(packetSize int) func(*Demuxer) { return func(d *Demuxer) { d.optPacketSize = packetSize } } -// DemuxerOptPacketsParser returns the option to set the packets parser +// DemuxerOptPacketsParser returns the option to set the packets parser. func DemuxerOptPacketsParser(p PacketsParser) func(*Demuxer) { return func(d *Demuxer) { d.optPacketsParser = p } } -// NextPacket retrieves the next packet -func (dmx *Demuxer) NextPacket() (p *Packet, err error) { +// NextPacket retrieves the next packet. +func (dmx *Demuxer) NextPacket() (*Packet, error) { // Check ctx error - // TODO Handle ctx error another way since if the read blocks, everything blocks - // Maybe execute everything in a goroutine and listen the ctx channel in the same for loop + // TODO Handle ctx error another way since if the read blocks, + // everything blocks Maybe execute everything in a goroutine + // and listen the ctx channel in the same for loop. + var err error if err = dmx.ctx.Err(); err != nil { - return + return nil, fmt.Errorf("context error: %w", err) } - // Create packet buffer if not exists + // Create packet buffer if not exists. if dmx.packetBuffer == nil { - if dmx.packetBuffer, err = newPacketBuffer(dmx.r, dmx.optPacketSize); err != nil { - err = fmt.Errorf("astits: creating packet buffer failed: %w", err) - return + dmx.packetBuffer, err = newPacketBuffer(dmx.r, dmx.optPacketSize) + if err != nil { + return nil, fmt.Errorf("creating packet buffer failed: %w", err) } } - // Fetch next packet from buffer - if p, err = dmx.packetBuffer.next(); err != nil { - if err != ErrNoMorePackets { - err = fmt.Errorf("astits: fetching next packet from buffer failed: %w", err) + // Fetch next packet from buffer. + p, err := dmx.packetBuffer.next() + if err != nil { + if errors.Is(err, io.EOF) { + return nil, io.EOF } - return + return nil, fmt.Errorf("fetching next packet from buffer failed: %w", err) } - return + + return p, nil } -// NextData retrieves the next data -func (dmx *Demuxer) NextData() (d *DemuxerData, err error) { - // Check data buffer +// NextData retrieves the next data. +func (dmx *Demuxer) NextData() (*DemuxerData, error) { + // Check data buffer. if len(dmx.dataBuffer) > 0 { - d = dmx.dataBuffer[0] + d := dmx.dataBuffer[0] dmx.dataBuffer = dmx.dataBuffer[1:] - return + return d, nil } - // Loop through packets + // Loop through packets. var p *Packet + var err error var ps []*Packet var ds []*DemuxerData for { - // Get next packet + // Get next packet. if p, err = dmx.NextPacket(); err != nil { - // If the end of the stream has been reached, we dump the packet pool - if err == ErrNoMorePackets { - for { - // Dump packet pool - if ps = dmx.packetPool.dump(); len(ps) == 0 { - break - } + if !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("fetching next packet failed: %w", err) + } + // If the end of the stream has been reached, we dump the packet pool. + for { + if ps = dmx.packetPool.dump(); len(ps) == 0 { + break + } - // Parse data - var errParseData error - if ds, errParseData = parseData(ps, dmx.optPacketsParser, dmx.programMap); errParseData != nil { - // We need to silence this error as there may be some incomplete data here - // We still want to try to parse all packets, in case final data is complete - continue - } + var errParseData error + if ds, errParseData = parseData(ps, dmx.optPacketsParser, dmx.programMap); errParseData != nil { + // We need to silence this error as there may be some + // incomplete data here We still want to try to + // parse all packets, in case final data is complete. + continue + } - // Update data - if d = dmx.updateData(ds); d != nil { - err = nil - return - } + if d := dmx.updateData(ds); d != nil { + return d, nil } - return } - err = fmt.Errorf("astits: fetching next packet failed: %w", err) - return + return nil, err } - // Add packet to the pool if ps = dmx.packetPool.add(p); len(ps) == 0 { continue } - // Parse data if ds, err = parseData(ps, dmx.optPacketsParser, dmx.programMap); err != nil { - err = fmt.Errorf("astits: building new data failed: %w", err) - return + return nil, fmt.Errorf("building new data failed: %w", err) } - // Update data - if d = dmx.updateData(ds); d != nil { - return + if d := dmx.updateData(ds); d != nil { + return d, nil } } } func (dmx *Demuxer) updateData(ds []*DemuxerData) (d *DemuxerData) { - // Check whether there is data to be processed + // Check whether there is data to be processed. if len(ds) > 0 { - // Process data + // Process data. d = ds[0] dmx.dataBuffer = append(dmx.dataBuffer, ds[1:]...) - // Update program map + // Update program map. for _, v := range ds { if v.PAT != nil { for _, pgm := range v.PAT.Programs { - // Program number 0 is reserved to NIT + // Program number 0 is reserved to NIT. if pgm.ProgramNumber > 0 { dmx.programMap.set(pgm.ProgramMapID, pgm.ProgramNumber) } @@ -178,13 +175,13 @@ func (dmx *Demuxer) updateData(ds []*DemuxerData) (d *DemuxerData) { return } -// Rewind rewinds the demuxer reader +// Rewind rewinds the demuxer reader. func (dmx *Demuxer) Rewind() (n int64, err error) { dmx.dataBuffer = []*DemuxerData{} dmx.packetBuffer = nil dmx.packetPool = newPacketPool(dmx.optPacketsParser, dmx.programMap) if n, err = rewind(dmx.r); err != nil { - err = fmt.Errorf("astits: rewinding reader failed: %w", err) + err = fmt.Errorf("rewinding reader failed: %w", err) return } return diff --git a/demuxer_test.go b/demuxer_test.go index 8fe9ecb..a752fe7 100644 --- a/demuxer_test.go +++ b/demuxer_test.go @@ -10,7 +10,7 @@ import ( "testing" "unicode" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -46,7 +46,7 @@ func TestDemuxerNextPacket(t *testing.T) { // Valid buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) b1, p1 := packet(*packetHeader, *packetAdaptationField, []byte("1"), true) w.Write(b1) b2, p2 := packet(*packetHeader, *packetAdaptationField, []byte("2"), true) @@ -66,13 +66,13 @@ func TestDemuxerNextPacket(t *testing.T) { // EOF _, err = dmx.NextPacket() - assert.EqualError(t, err, ErrNoMorePackets.Error()) + assert.ErrorIs(t, err, io.EOF) } func TestDemuxerNextData(t *testing.T) { // Init buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) b := psiBytes() b1, _ := packet(PacketHeader{ContinuityCounter: uint8(0), PayloadUnitStartIndicator: true, PID: PIDPAT}, PacketAdaptationField{}, b[:147], true) w.Write(b1) @@ -98,12 +98,12 @@ func TestDemuxerNextData(t *testing.T) { // No more packets _, err = dmx.NextData() - assert.EqualError(t, err, ErrNoMorePackets.Error()) + assert.ErrorIs(t, err, io.EOF) } func TestDemuxerNextDataUnknownDataPackets(t *testing.T) { buf := &bytes.Buffer{} - bufWriter := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + bufWriter := bitio.NewWriter(buf) // Packet that isn't a data packet (PSI or PES) b1, _ := packet(PacketHeader{ @@ -119,7 +119,7 @@ func TestDemuxerNextDataUnknownDataPackets(t *testing.T) { DemuxerOptPacketSize(188)) d, err := dmx.NextData() assert.Equal(t, (*DemuxerData)(nil), d) - assert.EqualError(t, err, ErrNoMorePackets.Error()) + assert.ErrorIs(t, err, io.EOF) } func TestDemuxerNextDataPATPMT(t *testing.T) { @@ -174,7 +174,7 @@ func BenchmarkDemuxer_NextData(b *testing.B) { b.ReportAllocs() buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) bs := psiBytes() b1, _ := packet(PacketHeader{ContinuityCounter: uint8(0), PayloadUnitStartIndicator: true, PID: PIDPAT}, PacketAdaptationField{}, bs[:147], true) w.Write(b1) diff --git a/descriptor.go b/descriptor.go index 943604a..d756f5f 100644 --- a/descriptor.go +++ b/descriptor.go @@ -4,19 +4,19 @@ import ( "fmt" "time" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// Audio types -// Page: 683 | https://books.google.fr/books?id=6dgWB3-rChYC&printsec=frontcover&hl=fr +// Audio types. Page: 683 | Link: +// https://books.google.fr/books?id=6dgWB3-rChYC&printsec=frontcover&hl=fr const ( AudioTypeCleanEffects = 0x1 AudioTypeHearingImpaired = 0x2 AudioTypeVisualImpairedCommentary = 0x3 ) -// Data stream alignments -// Page: 85 | Chapter:2.6.11 | Link: http://ecee.colorado.edu/~ecen5653/ecen5653/papers/iso13818-1.pdf +// Data stream alignments. Page: 85 | Chapter:2.6.11 | Link: +// http://ecee.colorado.edu/~ecen5653/ecen5653/papers/iso13818-1.pdf const ( DataStreamAligmentAudioSyncWord = 0x1 DataStreamAligmentVideoSliceOrAccessUnit = 0x1 @@ -25,8 +25,8 @@ const ( DataStreamAligmentVideoSEQ = 0x4 ) -// Descriptor tags -// Chapter: 6.1 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// Descriptor tags. Chapter: 6.1 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf const ( DescriptorTagAC3 = 0x6a DescriptorTagAVCVideo = 0x28 @@ -53,20 +53,20 @@ const ( DescriptorTagVBITeletext = 0x46 ) -// Descriptor extension tags -// Chapter: 6.3 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// Descriptor extension tags. Chapter: 6.3 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf const ( DescriptorTagExtensionSupplementaryAudio = 0x6 ) -// Service types -// Chapter: 6.2.33 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// Service types. Chapter: 6.2.33 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf const ( ServiceTypeDigitalTelevisionService = 0x1 ) -// Teletext types -// Chapter: 6.2.43 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// Teletext types. Chapter: 6.2.43 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf const ( TeletextTypeAdditionalInformationPage = 0x3 TeletextTypeInitialTeletextPage = 0x1 @@ -75,8 +75,8 @@ const ( TeletextTypeTeletextSubtitlePageForHearingImpairedPeople = 0x5 ) -// VBI data service id -// Chapter: 6.2.47 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// VBI data service id Chapter: 6.2.47 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf const ( VBIDataServiceIDClosedCaptioning = 0x6 VBIDataServiceIDEBUTeletext = 0x1 @@ -87,44 +87,47 @@ const ( ) // Descriptor represents a descriptor -// TODO Handle UTF8 +// TODO Handle UTF8. type Descriptor struct { AC3 *DescriptorAC3 AVCVideo *DescriptorAVCVideo Component *DescriptorComponent - Content *DescriptorContent - DataStreamAlignment *DescriptorDataStreamAlignment + Content DescriptorContent + DataStreamAlignment DescriptorDataStreamAlignment EnhancedAC3 *DescriptorEnhancedAC3 ExtendedEvent *DescriptorExtendedEvent Extension *DescriptorExtension ISO639LanguageAndAudioType *DescriptorISO639LanguageAndAudioType Length uint8 - LocalTimeOffset *DescriptorLocalTimeOffset - MaximumBitrate *DescriptorMaximumBitrate - NetworkName *DescriptorNetworkName - ParentalRating *DescriptorParentalRating - PrivateDataIndicator *DescriptorPrivateDataIndicator - PrivateDataSpecifier *DescriptorPrivateDataSpecifier + LocalTimeOffset DescriptorLocalTimeOffset + MaximumBitrate DescriptorMaximumBitrate + NetworkName DescriptorNetworkName + ParentalRating DescriptorParentalRating + PrivateDataIndicator DescriptorPrivateDataIndicator + PrivateDataSpecifier DescriptorPrivateDataSpecifier Registration *DescriptorRegistration Service *DescriptorService ShortEvent *DescriptorShortEvent - StreamIdentifier *DescriptorStreamIdentifier - Subtitling *DescriptorSubtitling - Tag uint8 // the tag defines the structure of the contained data following the descriptor length. - Teletext *DescriptorTeletext - Unknown *DescriptorUnknown - UserDefined []byte - VBIData *DescriptorVBIData - VBITeletext *DescriptorTeletext -} - -// DescriptorAC3 represents an AC3 descriptor -// Chapter: Annex D | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf + StreamIdentifier DescriptorStreamIdentifier + Subtitling DescriptorSubtitling + + // the tag defines the structure of the contained + // data following the descriptor length. + Tag uint8 + Teletext DescriptorTeletext + Unknown *DescriptorUnknown + UserDefined []byte + VBIData DescriptorVBIData + VBITeletext DescriptorTeletext +} + +// DescriptorAC3 represents an AC3 descriptor Chapter: Annex D | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorAC3 struct { AdditionalInfo []byte ASVC uint8 BSID uint8 - ComponentType uint8 + ComponentType uint8 // 4 Bits. HasASVC bool HasBSID bool HasComponentType bool @@ -132,74 +135,46 @@ type DescriptorAC3 struct { MainID uint8 } -func newDescriptorAC3(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorAC3, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return +func newDescriptorAC3(r *bitio.CountReader, offsetEnd int64) (*DescriptorAC3, error) { + d := &DescriptorAC3{ + HasASVC: r.TryReadBool(), + HasBSID: r.TryReadBool(), + HasComponentType: r.TryReadBool(), + HasMainID: r.TryReadBool(), } + _ = r.TryReadBits(4) // Reserved. - // Create descriptor - d = &DescriptorAC3{ - HasASVC: uint8(b&0x10) > 0, - HasBSID: uint8(b&0x40) > 0, - HasComponentType: uint8(b&0x80) > 0, - HasMainID: uint8(b&0x20) > 0, - } - - // Component type if d.HasComponentType { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.ComponentType = uint8(b) + d.ComponentType = r.TryReadByte() } - // BSID if d.HasBSID { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.BSID = uint8(b) + d.BSID = r.TryReadByte() } - // Main ID if d.HasMainID { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.MainID = uint8(b) + d.MainID = r.TryReadByte() } - // ASVC if d.HasASVC { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.ASVC = uint8(b) + d.ASVC = r.TryReadByte() } - // Additional info - if i.Offset() < offsetEnd { - if d.AdditionalInfo, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + if r.BitsCount/8 < offsetEnd { + d.AdditionalInfo = make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, d.AdditionalInfo) } - return + + return d, r.TryError } -// DescriptorAVCVideo represents an AVC video descriptor -// No doc found unfortunately, basing the implementation on https://github.com/gfto/bitstream/blob/master/mpeg/psi/desc_28.h +// DescriptorAVCVideo represents an AVC video descriptor. +// No doc found unfortunately, basing the implementation on +// https://github.com/gfto/bitstream/blob/master/mpeg/psi/desc_28.h type DescriptorAVCVideo struct { AVC24HourPictureFlag bool AVCStillPresent bool - CompatibleFlags uint8 + CompatibleFlags uint8 // 5 bits. ConstraintSet0Flag bool ConstraintSet1Flag bool ConstraintSet2Flag bool @@ -207,176 +182,96 @@ type DescriptorAVCVideo struct { ProfileIDC uint8 } -func newDescriptorAVCVideo(i *astikit.BytesIterator) (d *DescriptorAVCVideo, err error) { - // Init - d = &DescriptorAVCVideo{} +func newDescriptorAVCVideo(r *bitio.CountReader) (*DescriptorAVCVideo, error) { + d := &DescriptorAVCVideo{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + d.ProfileIDC = r.TryReadByte() - // Profile idc - d.ProfileIDC = uint8(b) + d.ConstraintSet0Flag = r.TryReadBool() + d.ConstraintSet1Flag = r.TryReadBool() + d.ConstraintSet2Flag = r.TryReadBool() + d.CompatibleFlags = uint8(r.TryReadBits(5)) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + d.LevelIDC = r.TryReadByte() - // Flags - d.ConstraintSet0Flag = b&0x80 > 0 - d.ConstraintSet1Flag = b&0x40 > 0 - d.ConstraintSet2Flag = b&0x20 > 0 - d.CompatibleFlags = b & 0x1f + d.AVCStillPresent = r.TryReadBool() + d.AVC24HourPictureFlag = r.TryReadBool() + // Reserved. + _ = r.TryReadBits(6) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Level idc - d.LevelIDC = uint8(b) - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // AVC still present - d.AVCStillPresent = b&0x80 > 0 - - // AVC 24 hour picture flag - d.AVC24HourPictureFlag = b&0x40 > 0 - return + return d, r.TryError } -// DescriptorComponent represents a component descriptor -// Chapter: 6.2.8 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorComponent represents a component descriptor Chapter: 6.2.8 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorComponent struct { - ComponentTag uint8 + StreamContentExt uint8 // 4 bits. + StreamContent uint8 // 4 bits. ComponentType uint8 - ISO639LanguageCode []byte - StreamContent uint8 - StreamContentExt uint8 + ComponentTag uint8 + ISO639LanguageCode []byte // 3 bytes. Text []byte } -func newDescriptorComponent(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorComponent, err error) { - // Init - d = &DescriptorComponent{} +func newDescriptorComponent(r *bitio.CountReader, offsetEnd int64) (*DescriptorComponent, error) { + d := &DescriptorComponent{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + d.StreamContentExt = uint8(r.TryReadBits(4)) + d.StreamContent = uint8(r.TryReadBits(4)) - // Stream content ext - d.StreamContentExt = uint8(b >> 4) + d.ComponentType = r.TryReadByte() + d.ComponentTag = r.TryReadByte() - // Stream content - d.StreamContent = uint8(b & 0xf) + d.ISO639LanguageCode = make([]byte, 3) + TryReadFull(r, d.ISO639LanguageCode) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return + if r.BitsCount/8 < offsetEnd { + d.Text = make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, d.Text) } - // Component type - d.ComponentType = uint8(b) - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Component tag - d.ComponentTag = uint8(b) - - // ISO639 language code - if d.ISO639LanguageCode, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Text - if i.Offset() < offsetEnd { - if d.Text, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - } - return + return d, r.TryError } -// DescriptorContent represents a content descriptor -// Chapter: 6.2.9 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorContent represents a content descriptor. Chapter: 6.2.9 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorContent struct { Items []*DescriptorContentItem } -// DescriptorContentItem represents a content item descriptor -// Chapter: 6.2.9 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorContentItem represents a content item descriptor. Chapter: 6.2.9 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorContentItem struct { - ContentNibbleLevel1 uint8 - ContentNibbleLevel2 uint8 + ContentNibbleLevel1 uint8 // 4 bits. + ContentNibbleLevel2 uint8 // 4 bits. UserByte uint8 } -func newDescriptorContent(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorContent, err error) { - // Init - d = &DescriptorContent{} +func newDescriptorContent(r *bitio.CountReader, offsetEnd int64) (DescriptorContent, error) { + items := []*DescriptorContentItem{} - // Add items - for i.Offset() < offsetEnd { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Append item - d.Items = append(d.Items, &DescriptorContentItem{ - ContentNibbleLevel1: uint8(bs[0] >> 4), - ContentNibbleLevel2: uint8(bs[0] & 0xf), - UserByte: uint8(bs[1]), + for r.BitsCount/8 < offsetEnd { + items = append(items, &DescriptorContentItem{ + ContentNibbleLevel1: uint8(r.TryReadBits(4)), + ContentNibbleLevel2: uint8(r.TryReadBits(4)), + UserByte: r.TryReadByte(), }) } - return -} -// DescriptorDataStreamAlignment represents a data stream alignment descriptor -type DescriptorDataStreamAlignment struct { - Type uint8 + return DescriptorContent{Items: items}, r.TryError } -func newDescriptorDataStreamAlignment(i *astikit.BytesIterator) (d *DescriptorDataStreamAlignment, err error) { - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d = &DescriptorDataStreamAlignment{Type: uint8(b)} - return +// DescriptorDataStreamAlignment represents a data stream alignment descriptor. +type DescriptorDataStreamAlignment uint8 + +func newDescriptorDataStreamAlignment(r *bitio.CountReader) (DescriptorDataStreamAlignment, error) { + typ, err := r.ReadByte() + return DescriptorDataStreamAlignment(typ), err } -// DescriptorEnhancedAC3 represents an enhanced AC3 descriptor -// Chapter: Annex D | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorEnhancedAC3 represents an enhanced AC3 descriptor. Chapter: Annex D | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorEnhancedAC3 struct { - AdditionalInfo []byte - ASVC uint8 - BSID uint8 - ComponentType uint8 HasASVC bool HasBSID bool HasComponentType bool @@ -384,457 +279,298 @@ type DescriptorEnhancedAC3 struct { HasSubStream1 bool HasSubStream2 bool HasSubStream3 bool - MainID uint8 MixInfoExists bool + ComponentType uint8 + BSID uint8 + MainID uint8 + ASVC uint8 SubStream1 uint8 SubStream2 uint8 SubStream3 uint8 + AdditionalInfo []byte } -func newDescriptorEnhancedAC3(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorEnhancedAC3, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Create descriptor - d = &DescriptorEnhancedAC3{ - HasASVC: uint8(b&0x10) > 0, - HasBSID: uint8(b&0x40) > 0, - HasComponentType: uint8(b&0x80) > 0, - HasMainID: uint8(b&0x20) > 0, - HasSubStream1: uint8(b&0x4) > 0, - HasSubStream2: uint8(b&0x2) > 0, - HasSubStream3: uint8(b&0x1) > 0, - MixInfoExists: uint8(b&0x8) > 0, +func newDescriptorEnhancedAC3(r *bitio.CountReader, offsetEnd int64) (*DescriptorEnhancedAC3, error) { + d := &DescriptorEnhancedAC3{ + HasASVC: r.TryReadBool(), + HasBSID: r.TryReadBool(), + HasComponentType: r.TryReadBool(), + HasMainID: r.TryReadBool(), + HasSubStream1: r.TryReadBool(), + HasSubStream2: r.TryReadBool(), + HasSubStream3: r.TryReadBool(), + MixInfoExists: r.TryReadBool(), } - // Component type if d.HasComponentType { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.ComponentType = uint8(b) + d.ComponentType = r.TryReadByte() } - - // BSID if d.HasBSID { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.BSID = uint8(b) + d.BSID = r.TryReadByte() } - - // Main ID if d.HasMainID { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.MainID = uint8(b) + d.MainID = r.TryReadByte() } - - // ASVC if d.HasASVC { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.ASVC = uint8(b) + d.ASVC = r.TryReadByte() } - - // Substream 1 if d.HasSubStream1 { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.SubStream1 = uint8(b) + d.SubStream1 = r.TryReadByte() } - - // Substream 2 if d.HasSubStream2 { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.SubStream2 = uint8(b) + d.SubStream2 = r.TryReadByte() } - - // Substream 3 if d.HasSubStream3 { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d.SubStream3 = uint8(b) + d.SubStream3 = r.TryReadByte() } - // Additional info - if i.Offset() < offsetEnd { - if d.AdditionalInfo, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + if r.BitsCount/8 < offsetEnd { + d.AdditionalInfo = make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, d.AdditionalInfo) } - return + + return d, r.TryError } -// DescriptorExtendedEvent represents an extended event descriptor -// Chapter: 6.2.15 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorExtendedEvent represents an extended event descriptor. Chapter: 6.2.15 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorExtendedEvent struct { - ISO639LanguageCode []byte + Number uint8 // 4 bits. + LastDescriptorNumber uint8 // 4 bits. + ISO639LanguageCode []byte // 3 bytes. Items []*DescriptorExtendedEventItem - LastDescriptorNumber uint8 - Number uint8 Text []byte } -// DescriptorExtendedEventItem represents an extended event item descriptor -// Chapter: 6.2.15 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorExtendedEventItem represents an extended event item descriptor. +// Chapter: 6.2.15 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorExtendedEventItem struct { Content []byte Description []byte } -func newDescriptorExtendedEvent(i *astikit.BytesIterator) (d *DescriptorExtendedEvent, err error) { - // Init - d = &DescriptorExtendedEvent{} - - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Number - d.Number = uint8(b >> 4) +func newDescriptorExtendedEvent(r *bitio.CountReader) (*DescriptorExtendedEvent, error) { + d := &DescriptorExtendedEvent{} - // Last descriptor number - d.LastDescriptorNumber = uint8(b & 0xf) + d.Number = uint8(r.TryReadBits(4)) - // ISO639 language code - if d.ISO639LanguageCode, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.LastDescriptorNumber = uint8(r.TryReadBits(4)) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + d.ISO639LanguageCode = make([]byte, 3) + TryReadFull(r, d.ISO639LanguageCode) - // Items length - itemsLength := int(b) + itemsLength := r.TryReadByte() + offsetEnd := r.BitsCount/8 + int64(itemsLength) - // Items - offsetEnd := i.Offset() + itemsLength - for i.Offset() < offsetEnd { - // Create item - var item *DescriptorExtendedEventItem - if item, err = newDescriptorExtendedEventItem(i); err != nil { - err = fmt.Errorf("astits: creating extended event item failed: %w", err) - return + for r.BitsCount/8 < offsetEnd { + item, err := newDescriptorExtendedEventItem(r) + if err != nil { + return nil, fmt.Errorf("creating extended event item failed: %w", err) } - // Append item d.Items = append(d.Items, item) } - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Text length - textLength := int(b) + textLength := r.TryReadByte() + d.Text = make([]byte, textLength) + TryReadFull(r, d.Text) - // Text - if d.Text, err = i.NextBytes(textLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return + return d, r.TryError } -func newDescriptorExtendedEventItem(i *astikit.BytesIterator) (d *DescriptorExtendedEventItem, err error) { - // Init - d = &DescriptorExtendedEventItem{} - - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } +func newDescriptorExtendedEventItem(r *bitio.CountReader) (*DescriptorExtendedEventItem, error) { + d := &DescriptorExtendedEventItem{} - // Description length - descriptionLength := int(b) - - // Description - if d.Description, err = i.NextBytes(descriptionLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + descriptionLength := r.TryReadByte() + d.Description = make([]byte, descriptionLength) + TryReadFull(r, d.Description) - // Content length - contentLength := int(b) + contentLength := r.TryReadByte() + d.Content = make([]byte, contentLength) + TryReadFull(r, d.Content) - // Content - if d.Content, err = i.NextBytes(contentLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return + return d, r.TryError } -// DescriptorExtension represents an extension descriptor -// Chapter: 6.2.16 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorExtension represents an extension descriptor. +// Chapter: 6.2.16 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorExtension struct { SupplementaryAudio *DescriptorExtensionSupplementaryAudio Tag uint8 Unknown *[]byte } -func newDescriptorExtension(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorExtension, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Create descriptor - d = &DescriptorExtension{Tag: uint8(b)} +func newDescriptorExtension(r *bitio.CountReader, offsetEnd int64) (*DescriptorExtension, error) { + d := &DescriptorExtension{} + d.Tag = r.TryReadByte() - // Switch on tag + var err error switch d.Tag { case DescriptorTagExtensionSupplementaryAudio: - if d.SupplementaryAudio, err = newDescriptorExtensionSupplementaryAudio(i, offsetEnd); err != nil { - err = fmt.Errorf("astits: parsing extension supplementary audio descriptor failed: %w", err) - return + if d.SupplementaryAudio, err = newDescriptorExtensionSupplementaryAudio(r, offsetEnd); err != nil { + return nil, err } default: - // Get next bytes - var b []byte - if b, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Update unknown - d.Unknown = &b + unknown := make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, unknown) + d.Unknown = &unknown } - return + return d, r.TryError } -// DescriptorExtensionSupplementaryAudio represents a supplementary audio extension descriptor -// Chapter: 6.4.10 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorExtensionSupplementaryAudio represents +// a supplementary audio extension descriptor. +// Chapter: 6.4.10 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorExtensionSupplementaryAudio struct { - EditorialClassification uint8 + EditorialClassification uint8 // 5 bits. HasLanguageCode bool - LanguageCode []byte MixType bool + LanguageCode []byte // 3 bytes. PrivateData []byte } -func newDescriptorExtensionSupplementaryAudio(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorExtensionSupplementaryAudio, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } +func newDescriptorExtensionSupplementaryAudio( + r *bitio.CountReader, offsetEnd int64, +) (*DescriptorExtensionSupplementaryAudio, error) { + d := &DescriptorExtensionSupplementaryAudio{} - // Init - d = &DescriptorExtensionSupplementaryAudio{ - EditorialClassification: uint8(b >> 2 & 0x1f), - HasLanguageCode: b&0x1 > 0, - MixType: b&0x80 > 0, - } + d.MixType = r.TryReadBool() + d.EditorialClassification = uint8(r.TryReadBits(5)) + _ = r.TryReadBool() // Reserved. + d.HasLanguageCode = r.TryReadBool() // Language code if d.HasLanguageCode { - if d.LanguageCode, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.LanguageCode = make([]byte, 3) + TryReadFull(r, d.LanguageCode) } - // Private data - if i.Offset() < offsetEnd { - if d.PrivateData, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + if r.BitsCount/8 < offsetEnd { + d.PrivateData = make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, d.PrivateData) } - return + + return d, r.TryError } // DescriptorISO639LanguageAndAudioType represents an ISO639 language descriptor // https://github.com/gfto/bitstream/blob/master/mpeg/psi/desc_0a.h -// FIXME (barbashov) according to Chapter 2.6.18 ISO/IEC 13818-1:2015 there could be not one, but multiple such descriptors +// FIXME (barbashov) according to Chapter 2.6.18 ISO/IEC 13818-1:2015 +// there could be not one, but multiple such descriptors. type DescriptorISO639LanguageAndAudioType struct { Language []byte Type uint8 } -// In some actual cases, the length is 3 and the language is described in only 2 bytes -func newDescriptorISO639LanguageAndAudioType(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorISO639LanguageAndAudioType, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } +// newDescriptorISO639LanguageAndAudioType In some actual cases, +// the length is 3 and the language is described in only 2 bytes. +func newDescriptorISO639LanguageAndAudioType( + r *bitio.CountReader, offsetEnd int64, +) (*DescriptorISO639LanguageAndAudioType, error) { + offset := uint8(offsetEnd - r.BitsCount/8) + language := make([]byte, offset-1) + TryReadFull(r, language) - // Create descriptor - d = &DescriptorISO639LanguageAndAudioType{ - Language: bs[0 : len(bs)-1], - Type: uint8(bs[len(bs)-1]), + d := &DescriptorISO639LanguageAndAudioType{ + Language: language, + Type: r.TryReadByte(), } - return + return d, r.TryError } // DescriptorLocalTimeOffset represents a local time offset descriptor -// Chapter: 6.2.20 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf -type DescriptorLocalTimeOffset struct { - Items []*DescriptorLocalTimeOffsetItem -} +// Chapter: 6.2.20 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +type DescriptorLocalTimeOffset []*DescriptorLocalTimeOffsetItem // DescriptorLocalTimeOffsetItem represents a local time offset item descriptor -// Chapter: 6.2.20 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// Chapter: 6.2.20 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorLocalTimeOffsetItem struct { - CountryCode []byte - CountryRegionID uint8 + CountryCode []byte // 3 bytes. + CountryRegionID uint8 // 6 bits. LocalTimeOffset time.Duration LocalTimeOffsetPolarity bool - NextTimeOffset time.Duration TimeOfChange time.Time + NextTimeOffset time.Duration } -func newDescriptorLocalTimeOffset(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorLocalTimeOffset, err error) { - // Init - d = &DescriptorLocalTimeOffset{} - - // Add items - for i.Offset() < offsetEnd { - // Create item - itm := &DescriptorLocalTimeOffsetItem{} +func newDescriptorLocalTimeOffset(r *bitio.CountReader, offsetEnd int64) (DescriptorLocalTimeOffset, error) { + d := DescriptorLocalTimeOffset{} - // Country code - if itm.CountryCode, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + for r.BitsCount/8 < offsetEnd { + item := &DescriptorLocalTimeOffsetItem{} + var err error - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + item.CountryCode = make([]byte, 3) + TryReadFull(r, item.CountryCode) - // Country region ID - itm.CountryRegionID = uint8(b >> 2) + item.CountryRegionID = uint8(r.TryReadBits(6)) + _ = r.TryReadBool() // Reserved. + item.LocalTimeOffsetPolarity = r.TryReadBool() - // Local time offset polarity - itm.LocalTimeOffsetPolarity = b&0x1 > 0 - - // Local time offset - if itm.LocalTimeOffset, err = parseDVBDurationMinutes(i); err != nil { - err = fmt.Errorf("astits: parsing DVB durationminutes failed: %w", err) - return + if item.LocalTimeOffset, err = parseDVBDurationMinutes(r); err != nil { + return nil, fmt.Errorf("parsing localTimeOffset failed: %w", err) } - // Time of change - if itm.TimeOfChange, err = parseDVBTime(i); err != nil { - err = fmt.Errorf("astits: parsing DVB time failed: %w", err) - return + if item.TimeOfChange, err = parseDVBTime(r); err != nil { + return nil, fmt.Errorf("parsing timeOfChange failed: %w", err) } - // Next time offset - if itm.NextTimeOffset, err = parseDVBDurationMinutes(i); err != nil { - err = fmt.Errorf("astits: parsing DVB duration minutes failed: %w", err) - return + if item.NextTimeOffset, err = parseDVBDurationMinutes(r); err != nil { + return nil, fmt.Errorf("parsing NextTimeOffset failed: %w", err) } - // Append item - d.Items = append(d.Items, itm) + d = append(d, item) } - return + return d, r.TryError } -// DescriptorMaximumBitrate represents a maximum bitrate descriptor +// DescriptorMaximumBitrate represents a maximum bitrate descriptor. +// ISO/IEC 13818-1 Chapter: 2.6.26 . type DescriptorMaximumBitrate struct { - Bitrate uint32 // In bytes/second + Bitrate uint32 // In bytes/second. 22 bits. } -func newDescriptorMaximumBitrate(i *astikit.BytesIterator) (d *DescriptorMaximumBitrate, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } +func newDescriptorMaximumBitrate(r *bitio.CountReader) (d DescriptorMaximumBitrate, err error) { + r.TryReadBits(2) // Reserved. - // Create descriptor - d = &DescriptorMaximumBitrate{Bitrate: (uint32(bs[0]&0x3f)<<16 | uint32(bs[1])<<8 | uint32(bs[2])) * 50} - return + bitrate := uint32(r.TryReadBits(22)) + return DescriptorMaximumBitrate{Bitrate: bitrate}, r.TryError } -// DescriptorNetworkName represents a network name descriptor -// Chapter: 6.2.27 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorNetworkName represents a network name descriptor. +// Chapter: 6.2.27 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorNetworkName struct { Name []byte } -func newDescriptorNetworkName(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorNetworkName, err error) { - // Create descriptor - d = &DescriptorNetworkName{} - - // Name - if d.Name, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return +func newDescriptorNetworkName(r *bitio.CountReader, offsetEnd int64) (d DescriptorNetworkName, err error) { + name := make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, name) + return DescriptorNetworkName{Name: name}, r.TryError } -// DescriptorParentalRating represents a parental rating descriptor -// Chapter: 6.2.28 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorParentalRating represents a parental rating descriptor. +// Chapter: 6.2.28 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorParentalRating struct { Items []*DescriptorParentalRatingItem } -// DescriptorParentalRatingItem represents a parental rating item descriptor -// Chapter: 6.2.28 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorParentalRatingItem represents a parental rating item descriptor. +// Chapter: 6.2.28 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorParentalRatingItem struct { - CountryCode []byte + CountryCode []byte // 3 bytes. Rating uint8 } -// MinimumAge returns the minimum age for the parental rating +// MinimumAge returns the minimum age for the parental rating. func (d DescriptorParentalRatingItem) MinimumAge() int { // Undefined or user defined ratings if d.Rating == 0 || d.Rating > 0x10 { @@ -843,618 +579,449 @@ func (d DescriptorParentalRatingItem) MinimumAge() int { return int(d.Rating) + 3 } -func newDescriptorParentalRating(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorParentalRating, err error) { - // Create descriptor - d = &DescriptorParentalRating{} +func newDescriptorParentalRating(r *bitio.CountReader, offsetEnd int64) (DescriptorParentalRating, error) { + items := []*DescriptorParentalRatingItem{} - // Add items - for i.Offset() < offsetEnd { - // Get next bytes - var bs []byte - if bs, err = i.NextBytes(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + for r.BitsCount/8 < offsetEnd { + country := make([]byte, 3) + TryReadFull(r, country) + + rating := r.TryReadByte() - // Append item - d.Items = append(d.Items, &DescriptorParentalRatingItem{ - CountryCode: bs[:3], - Rating: uint8(bs[3]), + items = append(items, &DescriptorParentalRatingItem{ + CountryCode: country, + Rating: rating, }) } - return -} - -// DescriptorPrivateDataIndicator represents a private data Indicator descriptor -type DescriptorPrivateDataIndicator struct { - Indicator uint32 + return DescriptorParentalRating{Items: items}, r.TryError } -func newDescriptorPrivateDataIndicator(i *astikit.BytesIterator) (d *DescriptorPrivateDataIndicator, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } +// DescriptorPrivateDataIndicator represents a private data Indicator descriptor. +type DescriptorPrivateDataIndicator uint32 - // Create descriptor - d = &DescriptorPrivateDataIndicator{Indicator: uint32(bs[0])<<24 | uint32(bs[1])<<16 | uint32(bs[2])<<8 | uint32(bs[3])} - return +func newDescriptorPrivateDataIndicator(r *bitio.CountReader) (DescriptorPrivateDataIndicator, error) { + data := uint32(r.TryReadBits(32)) + return DescriptorPrivateDataIndicator(data), r.TryError } -// DescriptorPrivateDataSpecifier represents a private data specifier descriptor +// DescriptorPrivateDataSpecifier represents a private data specifier descriptor. type DescriptorPrivateDataSpecifier struct { Specifier uint32 } -func newDescriptorPrivateDataSpecifier(i *astikit.BytesIterator) (d *DescriptorPrivateDataSpecifier, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Create descriptor - d = &DescriptorPrivateDataSpecifier{Specifier: uint32(bs[0])<<24 | uint32(bs[1])<<16 | uint32(bs[2])<<8 | uint32(bs[3])} - return +func newDescriptorPrivateDataSpecifier(r *bitio.CountReader) (DescriptorPrivateDataSpecifier, error) { + specifier := uint32(r.TryReadBits(32)) + return DescriptorPrivateDataSpecifier{Specifier: specifier}, r.TryError } -// DescriptorRegistration represents a registration descriptor -// Page: 84 | http://ecee.colorado.edu/~ecen5653/ecen5653/papers/iso13818-1.pdf +// DescriptorRegistration represents a registration descriptor. +// Page: 84 | Link: +// http://ecee.colorado.edu/~ecen5653/ecen5653/papers/iso13818-1.pdf type DescriptorRegistration struct { - AdditionalIdentificationInfo []byte FormatIdentifier uint32 + AdditionalIdentificationInfo []byte } -func newDescriptorRegistration(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorRegistration, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(4); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } +func newDescriptorRegistration(r *bitio.CountReader, offsetEnd int64) (*DescriptorRegistration, error) { + d := &DescriptorRegistration{} - // Create descriptor - d = &DescriptorRegistration{FormatIdentifier: uint32(bs[0])<<24 | uint32(bs[1])<<16 | uint32(bs[2])<<8 | uint32(bs[3])} + d.FormatIdentifier = uint32(r.TryReadBits(32)) - // Additional identification info - if i.Offset() < offsetEnd { - if d.AdditionalIdentificationInfo, err = i.NextBytes(offsetEnd - i.Offset()); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + if r.BitsCount/8 < offsetEnd { + d.AdditionalIdentificationInfo = make([]byte, offsetEnd-r.BitsCount/8) + TryReadFull(r, d.AdditionalIdentificationInfo) } - return + + return d, r.TryError } -// DescriptorService represents a service descriptor -// Chapter: 6.2.33 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorService represents a service descriptor. +// Chapter: 6.2.33 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorService struct { - Name []byte - Provider []byte Type uint8 + Provider []byte + Name []byte } -func newDescriptorService(i *astikit.BytesIterator) (d *DescriptorService, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Create descriptor - d = &DescriptorService{Type: uint8(b)} - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Provider length - providerLength := int(b) +func newDescriptorService(r *bitio.CountReader) (*DescriptorService, error) { + d := &DescriptorService{} - // Provider - if d.Provider, err = i.NextBytes(providerLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.Type = r.TryReadByte() - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + providerLength := r.TryReadByte() + d.Provider = make([]byte, providerLength) + TryReadFull(r, d.Provider) - // Name length - nameLength := int(b) + nameLength := r.TryReadByte() + d.Name = make([]byte, nameLength) + TryReadFull(r, d.Name) - // Name - if d.Name, err = i.NextBytes(nameLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return + return d, r.TryError } -// DescriptorShortEvent represents a short event descriptor -// Chapter: 6.2.37 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorShortEvent represents a short event descriptor. +// Chapter: 6.2.37 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorShortEvent struct { + Language []byte // 3 bytes. EventName []byte - Language []byte Text []byte } -func newDescriptorShortEvent(i *astikit.BytesIterator) (d *DescriptorShortEvent, err error) { - // Create descriptor - d = &DescriptorShortEvent{} +func newDescriptorShortEvent(r *bitio.CountReader) (*DescriptorShortEvent, error) { + d := &DescriptorShortEvent{} - // Language - if d.Language, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + d.Language = make([]byte, 3) + TryReadFull(r, d.Language) - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + eventLength := r.TryReadByte() + d.EventName = make([]byte, eventLength) + TryReadFull(r, d.EventName) - // Event length - eventLength := int(b) - - // Event name - if d.EventName, err = i.NextBytes(eventLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + textLength := r.TryReadByte() + d.Text = make([]byte, textLength) + TryReadFull(r, d.Text) - // Text length - textLength := int(b) - - // Text - if d.Text, err = i.NextBytes(textLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return + return d, r.TryError } -// DescriptorStreamIdentifier represents a stream identifier descriptor -// Chapter: 6.2.39 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorStreamIdentifier represents a stream identifier descriptor. +// Chapter: 6.2.39 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorStreamIdentifier struct { ComponentTag uint8 } -func newDescriptorStreamIdentifier(i *astikit.BytesIterator) (d *DescriptorStreamIdentifier, err error) { - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - d = &DescriptorStreamIdentifier{ComponentTag: uint8(b)} - return +func newDescriptorStreamIdentifier(r *bitio.CountReader) (DescriptorStreamIdentifier, error) { + identifier, err := r.ReadByte() + return DescriptorStreamIdentifier{ComponentTag: identifier}, err } -// DescriptorSubtitling represents a subtitling descriptor -// Chapter: 6.2.41 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorSubtitling represents a subtitling descriptor. +// Chapter: 6.2.41 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorSubtitling struct { Items []*DescriptorSubtitlingItem } -// DescriptorSubtitlingItem represents subtitling descriptor item -// Chapter: 6.2.41 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorSubtitlingItem represents subtitling descriptor item. +// Chapter: 6.2.41 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorSubtitlingItem struct { - AncillaryPageID uint16 - CompositionPageID uint16 - Language []byte + Language []byte // 3 bytes. Type uint8 + CompositionPageID uint16 + AncillaryPageID uint16 } -func newDescriptorSubtitling(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorSubtitling, err error) { - // Create descriptor - d = &DescriptorSubtitling{} - - // Loop - for i.Offset() < offsetEnd { - // Create item - itm := &DescriptorSubtitlingItem{} +func newDescriptorSubtitling(r *bitio.CountReader, offsetEnd int64) (DescriptorSubtitling, error) { + items := []*DescriptorSubtitlingItem{} - // Language - if itm.Language, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + for r.BitsCount/8 < offsetEnd { + item := &DescriptorSubtitlingItem{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + item.Language = make([]byte, 3) + TryReadFull(r, item.Language) - // Type - itm.Type = uint8(b) + item.Type = r.TryReadByte() - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + item.CompositionPageID = uint16(r.TryReadBits(16)) - // Composition page ID - itm.CompositionPageID = uint16(bs[0])<<8 | uint16(bs[1]) - - // Get next bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + item.AncillaryPageID = uint16(r.TryReadBits(16)) - // Ancillary page ID - itm.AncillaryPageID = uint16(bs[0])<<8 | uint16(bs[1]) - - // Append item - d.Items = append(d.Items, itm) + items = append(items, item) } - return + + return DescriptorSubtitling{Items: items}, r.TryError } -// DescriptorTeletext represents a teletext descriptor -// Chapter: 6.2.43 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorTeletext represents a teletext descriptor. +// Chapter: 6.2.43 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorTeletext struct { Items []*DescriptorTeletextItem } -// DescriptorTeletextItem represents a teletext descriptor item -// Chapter: 6.2.43 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorTeletextItem represents a teletext descriptor item. +// Chapter: 6.2.43 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorTeletextItem struct { Language []byte - Magazine uint8 + Type uint8 // 5 bits. + Magazine uint8 // 3 bits. Page uint8 - Type uint8 } -func newDescriptorTeletext(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorTeletext, err error) { - // Create descriptor - d = &DescriptorTeletext{} - - // Loop - for i.Offset() < offsetEnd { - // Create item - itm := &DescriptorTeletextItem{} +func newDescriptorTeletext(r *bitio.CountReader, offsetEnd int64) (DescriptorTeletext, error) { + items := []*DescriptorTeletextItem{} - // Language - if itm.Language, err = i.NextBytes(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + for r.BitsCount/8 < offsetEnd { + item := &DescriptorTeletextItem{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + item.Language = make([]byte, 3) + TryReadFull(r, item.Language) - // Type - itm.Type = uint8(b) >> 3 + item.Type = uint8(r.TryReadBits(5)) - // Magazine - itm.Magazine = uint8(b & 0x7) + item.Magazine = uint8(r.TryReadBits(3)) - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + b := r.TryReadByte() - // Page - itm.Page = uint8(b)>>4*10 + uint8(b&0xf) + // Optimization? + item.Page = b>>4*10 + b&0xf + // w.TryWriteBits(item.Page/10, 4) + // w.TryWriteBits(item.Page%10, 4) - // Append item - d.Items = append(d.Items, itm) + items = append(items, item) } - return + return DescriptorTeletext{Items: items}, r.TryError } +// DescriptorUnknown . type DescriptorUnknown struct { - Content []byte Tag uint8 + Content []byte } -func newDescriptorUnknown(i *astikit.BytesIterator, tag, length uint8) (d *DescriptorUnknown, err error) { - // Create descriptor - d = &DescriptorUnknown{Tag: tag} +func newDescriptorUnknown(r *bitio.CountReader, tag, length uint8) (*DescriptorUnknown, error) { + d := &DescriptorUnknown{Tag: tag} - // Get next bytes - if d.Content, err = i.NextBytes(int(length)); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - return + d.Content = make([]byte, length) + TryReadFull(r, d.Content) + return d, r.TryError } -// DescriptorVBIData represents a VBI data descriptor -// Chapter: 6.2.47 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf -type DescriptorVBIData struct { - Services []*DescriptorVBIDataService -} +// DescriptorVBIData represents a VBI data descriptor. +// Chapter: 6.2.47 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +type DescriptorVBIData []*DescriptorVBIDataService -// DescriptorVBIDataService represents a vbi data service descriptor -// Chapter: 6.2.47 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorVBIDataService represents a vbi data service descriptor. +// Chapter: 6.2.47 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorVBIDataService struct { DataServiceID uint8 Descriptors []*DescriptorVBIDataDescriptor } -// DescriptorVBIDataItem represents a vbi data descriptor item -// Chapter: 6.2.47 | Link: https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf +// DescriptorVBIDataDescriptor represents a vbi data descriptor item. +// Chapter: 6.2.47 | Link: +// https://www.etsi.org/deliver/etsi_en/300400_300499/300468/01.15.01_60/en_300468v011501p.pdf type DescriptorVBIDataDescriptor struct { FieldParity bool - LineOffset uint8 + LineOffset uint8 // 5 bits. } -func newDescriptorVBIData(i *astikit.BytesIterator, offsetEnd int) (d *DescriptorVBIData, err error) { - // Create descriptor - d = &DescriptorVBIData{} +func newDescriptorVBIData(r *bitio.CountReader, offsetEnd int64) DescriptorVBIData { + d := DescriptorVBIData{} - // Loop - for i.Offset() < offsetEnd { - // Create service + for r.BitsCount/8 < offsetEnd { srv := &DescriptorVBIDataService{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + srv.DataServiceID = r.TryReadByte() - // Data service ID - srv.DataServiceID = uint8(b) + dataServiceDescriptorLength := r.TryReadByte() - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Data service descriptor length - dataServiceDescriptorLength := int(b) - - // Data service descriptor - offsetDataEnd := i.Offset() + dataServiceDescriptorLength - for i.Offset() < offsetDataEnd { + offsetDataEnd := r.BitsCount/8 + int64(dataServiceDescriptorLength) + for r.BitsCount/8 < offsetDataEnd { if srv.DataServiceID == VBIDataServiceIDClosedCaptioning || srv.DataServiceID == VBIDataServiceIDEBUTeletext || srv.DataServiceID == VBIDataServiceIDInvertedTeletext || srv.DataServiceID == VBIDataServiceIDMonochrome442Samples || srv.DataServiceID == VBIDataServiceIDVPS || srv.DataServiceID == VBIDataServiceIDWSS { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + _ = r.TryReadBits(2) // Reserved. - // Append data srv.Descriptors = append(srv.Descriptors, &DescriptorVBIDataDescriptor{ - FieldParity: b&0x20 > 0, - LineOffset: uint8(b & 0x1f), + FieldParity: r.TryReadBool(), + LineOffset: uint8(r.TryReadBits(5)), }) } } - - // Append service - d.Services = append(d.Services, srv) + d = append(d, srv) } - return + + return d } -// parseDescriptors parses descriptors -func parseDescriptors(i *astikit.BytesIterator) (o []*Descriptor, err error) { - // Get next 2 bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return +// parseDescriptors parses descriptors. +func parseDescriptors(r *bitio.CountReader) ([]*Descriptor, error) { + var o []*Descriptor + + length := int64(r.TryReadBits(12)) + + if length <= 0 { + return o, nil } - // Get length - length := int(uint16(bs[0]&0xf)<<8 | uint16(bs[1])) + offsetEnd := r.BitsCount/8 + length + for r.BitsCount/8 < offsetEnd { + d := &Descriptor{ + Tag: r.TryReadByte(), + Length: r.TryReadByte(), + } - // Loop - if length > 0 { - offsetEnd := i.Offset() + length - for i.Offset() < offsetEnd { - // Get next 2 bytes - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } + if r.TryError != nil { + return nil, r.TryError + } - // Create descriptor - d := &Descriptor{ - Length: uint8(bs[1]), - Tag: uint8(bs[0]), - } + if d.Length <= 0 { + continue + } - // Parse data - if d.Length > 0 { - // Unfortunately there's no way to be sure the real descriptor length is the same as the one indicated - // previously therefore we must fetch bytes in descriptor functions and seek at the end - offsetDescriptorEnd := i.Offset() + int(d.Length) - - // User defined - if d.Tag >= 0x80 && d.Tag <= 0xfe { - // Get next bytes - if d.UserDefined, err = i.NextBytes(int(d.Length)); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - } else { - // Switch on tag - switch d.Tag { - case DescriptorTagAC3: - if d.AC3, err = newDescriptorAC3(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing AC3 descriptor failed: %w", err) - return - } - case DescriptorTagAVCVideo: - if d.AVCVideo, err = newDescriptorAVCVideo(i); err != nil { - err = fmt.Errorf("astits: parsing AVC Video descriptor failed: %w", err) - return - } - case DescriptorTagComponent: - if d.Component, err = newDescriptorComponent(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Component descriptor failed: %w", err) - return - } - case DescriptorTagContent: - if d.Content, err = newDescriptorContent(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Content descriptor failed: %w", err) - return - } - case DescriptorTagDataStreamAlignment: - if d.DataStreamAlignment, err = newDescriptorDataStreamAlignment(i); err != nil { - err = fmt.Errorf("astits: parsing Data Stream Alignment descriptor failed: %w", err) - return - } - case DescriptorTagEnhancedAC3: - if d.EnhancedAC3, err = newDescriptorEnhancedAC3(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Enhanced AC3 descriptor failed: %w", err) - return - } - case DescriptorTagExtendedEvent: - if d.ExtendedEvent, err = newDescriptorExtendedEvent(i); err != nil { - err = fmt.Errorf("astits: parsing Extended event descriptor failed: %w", err) - return - } - case DescriptorTagExtension: - if d.Extension, err = newDescriptorExtension(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Extension descriptor failed: %w", err) - return - } - case DescriptorTagISO639LanguageAndAudioType: - if d.ISO639LanguageAndAudioType, err = newDescriptorISO639LanguageAndAudioType(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing ISO639 Language and Audio Type descriptor failed: %w", err) - return - } - case DescriptorTagLocalTimeOffset: - if d.LocalTimeOffset, err = newDescriptorLocalTimeOffset(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Local Time Offset descriptor failed: %w", err) - return - } - case DescriptorTagMaximumBitrate: - if d.MaximumBitrate, err = newDescriptorMaximumBitrate(i); err != nil { - err = fmt.Errorf("astits: parsing Maximum Bitrate descriptor failed: %w", err) - return - } - case DescriptorTagNetworkName: - if d.NetworkName, err = newDescriptorNetworkName(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Network Name descriptor failed: %w", err) - return - } - case DescriptorTagParentalRating: - if d.ParentalRating, err = newDescriptorParentalRating(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Parental Rating descriptor failed: %w", err) - return - } - case DescriptorTagPrivateDataIndicator: - if d.PrivateDataIndicator, err = newDescriptorPrivateDataIndicator(i); err != nil { - err = fmt.Errorf("astits: parsing Private Data Indicator descriptor failed: %w", err) - return - } - case DescriptorTagPrivateDataSpecifier: - if d.PrivateDataSpecifier, err = newDescriptorPrivateDataSpecifier(i); err != nil { - err = fmt.Errorf("astits: parsing Private Data Specifier descriptor failed: %w", err) - return - } - case DescriptorTagRegistration: - if d.Registration, err = newDescriptorRegistration(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Registration descriptor failed: %w", err) - return - } - case DescriptorTagService: - if d.Service, err = newDescriptorService(i); err != nil { - err = fmt.Errorf("astits: parsing Service descriptor failed: %w", err) - return - } - case DescriptorTagShortEvent: - if d.ShortEvent, err = newDescriptorShortEvent(i); err != nil { - err = fmt.Errorf("astits: parsing Short Event descriptor failed: %w", err) - return - } - case DescriptorTagStreamIdentifier: - if d.StreamIdentifier, err = newDescriptorStreamIdentifier(i); err != nil { - err = fmt.Errorf("astits: parsing Stream Identifier descriptor failed: %w", err) - return - } - case DescriptorTagSubtitling: - if d.Subtitling, err = newDescriptorSubtitling(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Subtitling descriptor failed: %w", err) - return - } - case DescriptorTagTeletext: - if d.Teletext, err = newDescriptorTeletext(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing Teletext descriptor failed: %w", err) - return - } - case DescriptorTagVBIData: - if d.VBIData, err = newDescriptorVBIData(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing VBI Date descriptor failed: %w", err) - return - } - case DescriptorTagVBITeletext: - if d.VBITeletext, err = newDescriptorTeletext(i, offsetDescriptorEnd); err != nil { - err = fmt.Errorf("astits: parsing VBI Teletext descriptor failed: %w", err) - return - } - default: - if d.Unknown, err = newDescriptorUnknown(i, d.Tag, d.Length); err != nil { - err = fmt.Errorf("astits: parsing unknown descriptor failed: %w", err) - return - } - } - } - - // Seek in iterator to make sure we move to the end of the descriptor since its content may be - // corrupted - i.Seek(offsetDescriptorEnd) + // Parse data. + // Unfortunately there's no way to be sure the real descriptor + // length is the same as the one indicated previously therefore + // we must fetch bytes in descriptor functions and seek at the end. + offsetDescriptorEnd := r.BitsCount/8 + int64(d.Length) + + // User defined + if d.Tag >= 0x80 && d.Tag <= 0xfe { + d.UserDefined = make([]byte, d.Length) + TryReadFull(r, d.UserDefined) + + // Make sure we move to the end of the descriptor + // since its content may be corrupted. + if offsetDescriptorEnd > r.BitsCount/8 { + skip := make([]byte, offsetDescriptorEnd-r.BitsCount/8) + TryReadFull(r, skip) } + o = append(o, d) + continue + } + + err := parseDescriptor(d, r, offsetDescriptorEnd) + if err != nil { + return nil, err } + + o = append(o, d) } - return + return o, r.TryError } -func calcDescriptorUserDefinedLength(d []byte) uint8 { - return uint8(len(d)) -} +func parseDescriptor( //nolint:funlen,gocognit,gocyclo + d *Descriptor, + r *bitio.CountReader, + offsetDescriptorEnd int64, +) error { + var err error -func writeDescriptorUserDefined(w *astikit.BitsWriter, d []byte) error { - b := astikit.NewBitsWriterBatch(w) + switch d.Tag { + case DescriptorTagAC3: + if d.AC3, err = newDescriptorAC3(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing AC3 descriptor failed: %w", err) + } + case DescriptorTagAVCVideo: + if d.AVCVideo, err = newDescriptorAVCVideo(r); err != nil { + return fmt.Errorf("parsing AVC Video descriptor failed: %w", err) + } + case DescriptorTagComponent: + if d.Component, err = newDescriptorComponent(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Component descriptor failed: %w", err) + } + case DescriptorTagContent: + if d.Content, err = newDescriptorContent(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Content descriptor failed: %w", err) + } + case DescriptorTagDataStreamAlignment: + if d.DataStreamAlignment, err = newDescriptorDataStreamAlignment(r); err != nil { + return fmt.Errorf("parsing Data Stream Alignment descriptor failed: %w", err) + } + case DescriptorTagEnhancedAC3: + if d.EnhancedAC3, err = newDescriptorEnhancedAC3(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Enhanced AC3 descriptor failed: %w", err) + } + case DescriptorTagExtendedEvent: + if d.ExtendedEvent, err = newDescriptorExtendedEvent(r); err != nil { + return fmt.Errorf("parsing Extended event descriptor failed: %w", err) + } + case DescriptorTagExtension: + if d.Extension, err = newDescriptorExtension(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Extension descriptor failed: %w", err) + } + case DescriptorTagISO639LanguageAndAudioType: + if d.ISO639LanguageAndAudioType, err = newDescriptorISO639LanguageAndAudioType(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing ISO639 Language and Audio Type descriptor failed: %w", err) + } + case DescriptorTagLocalTimeOffset: + if d.LocalTimeOffset, err = newDescriptorLocalTimeOffset(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Local Time Offset descriptor failed: %w", err) + } + case DescriptorTagMaximumBitrate: + if d.MaximumBitrate, err = newDescriptorMaximumBitrate(r); err != nil { + return fmt.Errorf("parsing Maximum Bitrate descriptor failed: %w", err) + } + case DescriptorTagNetworkName: + if d.NetworkName, err = newDescriptorNetworkName(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Network Name descriptor failed: %w", err) + } + case DescriptorTagParentalRating: + if d.ParentalRating, err = newDescriptorParentalRating(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Parental Rating descriptor failed: %w", err) + } + case DescriptorTagPrivateDataIndicator: + if d.PrivateDataIndicator, err = newDescriptorPrivateDataIndicator(r); err != nil { + return fmt.Errorf("parsing Private Data Indicator descriptor failed: %w", err) + } + case DescriptorTagPrivateDataSpecifier: + if d.PrivateDataSpecifier, err = newDescriptorPrivateDataSpecifier(r); err != nil { + return fmt.Errorf("parsing Private Data Specifier descriptor failed: %w", err) + } + case DescriptorTagRegistration: + if d.Registration, err = newDescriptorRegistration(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Registration descriptor failed: %w", err) + } + case DescriptorTagService: + if d.Service, err = newDescriptorService(r); err != nil { + return fmt.Errorf("parsing Service descriptor failed: %w", err) + } + case DescriptorTagShortEvent: + if d.ShortEvent, err = newDescriptorShortEvent(r); err != nil { + return fmt.Errorf("parsing Short Event descriptor failed: %w", err) + } + case DescriptorTagStreamIdentifier: + if d.StreamIdentifier, err = newDescriptorStreamIdentifier(r); err != nil { + return fmt.Errorf("parsing Stream Identifier descriptor failed: %w", err) + } + case DescriptorTagSubtitling: + if d.Subtitling, err = newDescriptorSubtitling(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Subtitling descriptor failed: %w", err) + } + case DescriptorTagTeletext: + if d.Teletext, err = newDescriptorTeletext(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing Teletext descriptor failed: %w", err) + } + case DescriptorTagVBIData: + d.VBIData = newDescriptorVBIData(r, offsetDescriptorEnd) + case DescriptorTagVBITeletext: + if d.VBITeletext, err = newDescriptorTeletext(r, offsetDescriptorEnd); err != nil { + return fmt.Errorf("parsing VBI Teletext descriptor failed: %w", err) + } + default: + if d.Unknown, err = newDescriptorUnknown(r, d.Tag, d.Length); err != nil { + return fmt.Errorf("parsing unknown descriptor failed: %w", err) + } + } - b.Write(d) + // Make sure we move to the end of the descriptor + // since its content may be corrupted. + if offsetDescriptorEnd > r.BitsCount/8 { + seek := make([]byte, offsetDescriptorEnd-r.BitsCount/8) + TryReadFull(r, seek) + } + + return nil +} - return b.Err() +func calcDescriptorUserDefinedLength(d []byte) uint8 { + return uint8(len(d)) } func calcDescriptorAC3Length(d *DescriptorAC3) uint8 { - ret := 1 // flags + ret := 1 // flags. if d.HasComponentType { ret++ @@ -1474,105 +1041,92 @@ func calcDescriptorAC3Length(d *DescriptorAC3) uint8 { return uint8(ret) } -func writeDescriptorAC3(w *astikit.BitsWriter, d *DescriptorAC3) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.HasComponentType) - b.Write(d.HasBSID) - b.Write(d.HasMainID) - b.Write(d.HasASVC) - b.WriteN(uint8(0xff), 4) +func writeDescriptorAC3(w *bitio.Writer, d *DescriptorAC3) error { + w.TryWriteBool(d.HasComponentType) + w.TryWriteBool(d.HasBSID) + w.TryWriteBool(d.HasMainID) + w.TryWriteBool(d.HasASVC) + w.TryWriteBits(0xff, 4) // Reserved. if d.HasComponentType { - b.Write(d.ComponentType) + w.TryWriteByte(d.ComponentType) } if d.HasBSID { - b.Write(d.BSID) + w.TryWriteByte(d.BSID) } if d.HasMainID { - b.Write(d.MainID) + w.TryWriteByte(d.MainID) } if d.HasASVC { - b.Write(d.ASVC) + w.TryWriteByte(d.ASVC) } - b.Write(d.AdditionalInfo) + w.TryWrite(d.AdditionalInfo) - return b.Err() + return w.TryError } func calcDescriptorAVCVideoLength(d *DescriptorAVCVideo) uint8 { return 4 } -func writeDescriptorAVCVideo(w *astikit.BitsWriter, d *DescriptorAVCVideo) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.ProfileIDC) +func writeDescriptorAVCVideo(w *bitio.Writer, d *DescriptorAVCVideo) error { + w.TryWriteByte(d.ProfileIDC) - b.Write(d.ConstraintSet0Flag) - b.Write(d.ConstraintSet1Flag) - b.Write(d.ConstraintSet2Flag) - b.WriteN(d.CompatibleFlags, 5) + w.TryWriteBool(d.ConstraintSet0Flag) + w.TryWriteBool(d.ConstraintSet1Flag) + w.TryWriteBool(d.ConstraintSet2Flag) + w.TryWriteBits(uint64(d.CompatibleFlags), 5) - b.Write(d.LevelIDC) + w.TryWriteByte(d.LevelIDC) - b.Write(d.AVCStillPresent) - b.Write(d.AVC24HourPictureFlag) - b.WriteN(uint8(0xff), 6) + w.TryWriteBool(d.AVCStillPresent) + w.TryWriteBool(d.AVC24HourPictureFlag) + w.TryWriteBits(uint64(0xff), 6) // Reserved. - return b.Err() + return w.TryError } func calcDescriptorComponentLength(d *DescriptorComponent) uint8 { return uint8(6 + len(d.Text)) } -func writeDescriptorComponent(w *astikit.BitsWriter, d *DescriptorComponent) error { - b := astikit.NewBitsWriterBatch(w) +func writeDescriptorComponent(w *bitio.Writer, d *DescriptorComponent) error { + w.TryWriteBits(uint64(d.StreamContentExt), 4) + w.TryWriteBits(uint64(d.StreamContent), 4) - b.WriteN(d.StreamContentExt, 4) - b.WriteN(d.StreamContent, 4) + w.TryWriteByte(d.ComponentType) + w.TryWriteByte(d.ComponentTag) - b.Write(d.ComponentType) - b.Write(d.ComponentTag) + w.TryWrite(d.ISO639LanguageCode) - b.WriteBytesN(d.ISO639LanguageCode, 3, 0) + w.TryWrite(d.Text) - b.Write(d.Text) - - return b.Err() + return w.TryError } -func calcDescriptorContentLength(d *DescriptorContent) uint8 { +func calcDescriptorContentLength(d DescriptorContent) uint8 { return uint8(2 * len(d.Items)) } -func writeDescriptorContent(w *astikit.BitsWriter, d *DescriptorContent) error { - b := astikit.NewBitsWriterBatch(w) - +func writeDescriptorContent(w *bitio.Writer, d DescriptorContent) error { for _, item := range d.Items { - b.WriteN(item.ContentNibbleLevel1, 4) - b.WriteN(item.ContentNibbleLevel2, 4) - b.Write(item.UserByte) + w.TryWriteBits(uint64(item.ContentNibbleLevel1), 4) + w.TryWriteBits(uint64(item.ContentNibbleLevel2), 4) + w.TryWriteByte(item.UserByte) } - - return b.Err() + return w.TryError } -func calcDescriptorDataStreamAlignmentLength(d *DescriptorDataStreamAlignment) uint8 { +func calcDescriptorDataStreamAlignmentLength(d DescriptorDataStreamAlignment) uint8 { return 1 } -func writeDescriptorDataStreamAlignment(w *astikit.BitsWriter, d *DescriptorDataStreamAlignment) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Type) - - return b.Err() +func writeDescriptorDataStreamAlignment(w *bitio.Writer, d DescriptorDataStreamAlignment) error { + return w.WriteByte(uint8(d)) } func calcDescriptorEnhancedAC3Length(d *DescriptorEnhancedAC3) uint8 { - ret := 1 // flags + ret := 1 // flags. if d.HasComponentType { ret++ @@ -1601,88 +1155,84 @@ func calcDescriptorEnhancedAC3Length(d *DescriptorEnhancedAC3) uint8 { return uint8(ret) } -func writeDescriptorEnhancedAC3(w *astikit.BitsWriter, d *DescriptorEnhancedAC3) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.HasComponentType) - b.Write(d.HasBSID) - b.Write(d.HasMainID) - b.Write(d.HasASVC) - b.Write(d.MixInfoExists) - b.Write(d.HasSubStream1) - b.Write(d.HasSubStream2) - b.Write(d.HasSubStream3) +func writeDescriptorEnhancedAC3(w *bitio.Writer, d *DescriptorEnhancedAC3) error { + w.TryWriteBool(d.HasComponentType) + w.TryWriteBool(d.HasBSID) + w.TryWriteBool(d.HasMainID) + w.TryWriteBool(d.HasASVC) + w.TryWriteBool(d.MixInfoExists) + w.TryWriteBool(d.HasSubStream1) + w.TryWriteBool(d.HasSubStream2) + w.TryWriteBool(d.HasSubStream3) if d.HasComponentType { - b.Write(d.ComponentType) + w.TryWriteByte(d.ComponentType) } if d.HasBSID { - b.Write(d.BSID) + w.TryWriteByte(d.BSID) } if d.HasMainID { - b.Write(d.MainID) + w.TryWriteByte(d.MainID) } if d.HasASVC { - b.Write(d.ASVC) + w.TryWriteByte(d.ASVC) } if d.HasSubStream1 { - b.Write(d.SubStream1) + w.TryWriteByte(d.SubStream1) } if d.HasSubStream2 { - b.Write(d.SubStream2) + w.TryWriteByte(d.SubStream2) } if d.HasSubStream3 { - b.Write(d.SubStream3) + w.TryWriteByte(d.SubStream3) } - b.Write(d.AdditionalInfo) + w.TryWrite(d.AdditionalInfo) - return b.Err() + return w.TryError } func calcDescriptorExtendedEventLength(d *DescriptorExtendedEvent) (descriptorLength, lengthOfItems uint8) { - ret := 1 + 3 + 1 // numbers, language and items length + ret := 1 + 3 + 1 // numbers, language and items length. itemsRet := 0 for _, item := range d.Items { - itemsRet += 1 // description length + itemsRet++ // description length itemsRet += len(item.Description) - itemsRet += 1 // content length + itemsRet++ // content length itemsRet += len(item.Content) } ret += itemsRet - ret += 1 // text length + ret++ // text length ret += len(d.Text) return uint8(ret), uint8(itemsRet) } -func writeDescriptorExtendedEvent(w *astikit.BitsWriter, d *DescriptorExtendedEvent) error { - b := astikit.NewBitsWriterBatch(w) - +func writeDescriptorExtendedEvent(w *bitio.Writer, d *DescriptorExtendedEvent) error { var lengthOfItems uint8 _, lengthOfItems = calcDescriptorExtendedEventLength(d) - b.WriteN(d.Number, 4) - b.WriteN(d.LastDescriptorNumber, 4) + w.TryWriteBits(uint64(d.Number), 4) + w.TryWriteBits(uint64(d.LastDescriptorNumber), 4) - b.WriteBytesN(d.ISO639LanguageCode, 3, 0) + w.TryWrite(d.ISO639LanguageCode) - b.Write(lengthOfItems) + w.TryWriteByte(lengthOfItems) for _, item := range d.Items { - b.Write(uint8(len(item.Description))) - b.Write(item.Description) - b.Write(uint8(len(item.Content))) - b.Write(item.Content) + w.TryWriteByte(uint8(len(item.Description))) + w.TryWrite(item.Description) + w.TryWriteByte(uint8(len(item.Content))) + w.TryWrite(item.Content) } - b.Write(uint8(len(d.Text))) - b.Write(d.Text) + w.TryWriteByte(uint8(len(d.Text))) + w.TryWrite(d.Text) - return b.Err() + return w.TryError } func calcDescriptorExtensionSupplementaryAudioLength(d *DescriptorExtensionSupplementaryAudio) int { @@ -1695,7 +1245,7 @@ func calcDescriptorExtensionSupplementaryAudioLength(d *DescriptorExtensionSuppl } func calcDescriptorExtensionLength(d *DescriptorExtension) uint8 { - ret := 1 // tag + ret := 1 // tag. switch d.Tag { case DescriptorTagExtensionSupplementaryAudio: @@ -1709,27 +1259,25 @@ func calcDescriptorExtensionLength(d *DescriptorExtension) uint8 { return uint8(ret) } -func writeDescriptorExtensionSupplementaryAudio(w *astikit.BitsWriter, d *DescriptorExtensionSupplementaryAudio) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.MixType) - b.WriteN(d.EditorialClassification, 5) - b.Write(true) // reserved - b.Write(d.HasLanguageCode) +func writeDescriptorExtensionSupplementaryAudio(w *bitio.Writer, d *DescriptorExtensionSupplementaryAudio) error { + w.TryWriteBool(d.MixType) + w.TryWriteBits(uint64(d.EditorialClassification), 5) + w.TryWriteBool(true) // Reserved. + w.TryWriteBool(d.HasLanguageCode) if d.HasLanguageCode { - b.WriteBytesN(d.LanguageCode, 3, 0) + w.TryWrite(d.LanguageCode) } - b.Write(d.PrivateData) + w.TryWrite(d.PrivateData) - return b.Err() + return w.TryError } -func writeDescriptorExtension(w *astikit.BitsWriter, d *DescriptorExtension) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Tag) +func writeDescriptorExtension(w *bitio.Writer, d *DescriptorExtension) error { + if err := w.WriteByte(d.Tag); err != nil { + return err + } switch d.Tag { case DescriptorTagExtensionSupplementaryAudio: @@ -1739,129 +1287,108 @@ func writeDescriptorExtension(w *astikit.BitsWriter, d *DescriptorExtension) err } default: if d.Unknown != nil { - b.Write(*d.Unknown) + if _, err := w.Write(*d.Unknown); err != nil { + return err + } } } - - return b.Err() + return nil } func calcDescriptorISO639LanguageAndAudioTypeLength(d *DescriptorISO639LanguageAndAudioType) uint8 { - return 3 + 1 // language code + type + return 3 + 1 // language code + type. } -func writeDescriptorISO639LanguageAndAudioType(w *astikit.BitsWriter, d *DescriptorISO639LanguageAndAudioType) error { - b := astikit.NewBitsWriterBatch(w) - - b.WriteBytesN(d.Language, 3, 0) - b.Write(d.Type) +func writeDescriptorISO639LanguageAndAudioType(w *bitio.Writer, d *DescriptorISO639LanguageAndAudioType) error { + w.TryWrite(d.Language) + w.TryWriteByte(d.Type) - return b.Err() + return w.TryError } -func calcDescriptorLocalTimeOffsetLength(d *DescriptorLocalTimeOffset) uint8 { - return uint8(13 * len(d.Items)) +func calcDescriptorLocalTimeOffsetLength(d DescriptorLocalTimeOffset) uint8 { + return uint8(13 * len(d)) } -func writeDescriptorLocalTimeOffset(w *astikit.BitsWriter, d *DescriptorLocalTimeOffset) error { - b := astikit.NewBitsWriterBatch(w) - - for _, item := range d.Items { - b.WriteBytesN(item.CountryCode, 3, 0) +func writeDescriptorLocalTimeOffset(w *bitio.Writer, d DescriptorLocalTimeOffset) error { + for _, item := range d { + w.TryWrite(item.CountryCode) - b.WriteN(item.CountryRegionID, 6) - b.WriteN(uint8(0xff), 1) - b.Write(item.LocalTimeOffsetPolarity) + w.TryWriteBits(uint64(item.CountryRegionID), 6) + w.TryWriteBits(0xff, 1) // Reserved. + w.TryWriteBool(item.LocalTimeOffsetPolarity) - if _, err := writeDVBDurationMinutes(w, item.LocalTimeOffset); err != nil { - return err + if err := writeDVBDurationMinutes(w, item.LocalTimeOffset); err != nil { + return fmt.Errorf("writing LocalTimeOffset failed: %w", err) } if _, err := writeDVBTime(w, item.TimeOfChange); err != nil { - return err + return fmt.Errorf("writing TimeOfChange failed: %w", err) } - if _, err := writeDVBDurationMinutes(w, item.NextTimeOffset); err != nil { - return err + if err := writeDVBDurationMinutes(w, item.NextTimeOffset); err != nil { + return fmt.Errorf("writing NextTimeOffset failed: %w", err) } } - return b.Err() + return w.TryError } -func calcDescriptorMaximumBitrateLength(d *DescriptorMaximumBitrate) uint8 { +func calcDescriptorMaximumBitrateLength(d DescriptorMaximumBitrate) uint8 { return 3 } -func writeDescriptorMaximumBitrate(w *astikit.BitsWriter, d *DescriptorMaximumBitrate) error { - b := astikit.NewBitsWriterBatch(w) - - b.WriteN(uint8(0xff), 2) - b.WriteN(uint32(d.Bitrate/50), 22) +func writeDescriptorMaximumBitrate(w *bitio.Writer, d DescriptorMaximumBitrate) error { + w.TryWriteBits(0xff, 2) // Reserved. + w.TryWriteBits(uint64(d.Bitrate), 22) - return b.Err() + return w.TryError } -func calcDescriptorNetworkNameLength(d *DescriptorNetworkName) uint8 { - return uint8(len(d.Name)) +func calcDescriptorNetworkNameLength(name DescriptorNetworkName) uint8 { + return uint8(len(name.Name)) } -func writeDescriptorNetworkName(w *astikit.BitsWriter, d *DescriptorNetworkName) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Name) - - return b.Err() +func writeDescriptorNetworkName(w *bitio.Writer, d DescriptorNetworkName) error { + _, err := w.Write(d.Name) + return err } -func calcDescriptorParentalRatingLength(d *DescriptorParentalRating) uint8 { +func calcDescriptorParentalRatingLength(d DescriptorParentalRating) uint8 { return uint8(4 * len(d.Items)) } -func writeDescriptorParentalRating(w *astikit.BitsWriter, d *DescriptorParentalRating) error { - b := astikit.NewBitsWriterBatch(w) - +func writeDescriptorParentalRating(w *bitio.Writer, d DescriptorParentalRating) error { for _, item := range d.Items { - b.WriteBytesN(item.CountryCode, 3, 0) - b.Write(item.Rating) + w.TryWrite(item.CountryCode) + w.TryWriteByte(item.Rating) } - - return b.Err() + return w.TryError } -func calcDescriptorPrivateDataIndicatorLength(d *DescriptorPrivateDataIndicator) uint8 { +func calcDescriptorPrivateDataIndicatorLength(d DescriptorPrivateDataIndicator) uint8 { return 4 } -func writeDescriptorPrivateDataIndicator(w *astikit.BitsWriter, d *DescriptorPrivateDataIndicator) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Indicator) - - return b.Err() +func writeDescriptorPrivateDataIndicator(w *bitio.Writer, d DescriptorPrivateDataIndicator) error { + return w.WriteBits(uint64(d), 32) } -func calcDescriptorPrivateDataSpecifierLength(d *DescriptorPrivateDataSpecifier) uint8 { +func calcDescriptorPrivateDataSpecifierLength(d DescriptorPrivateDataSpecifier) uint8 { return 4 } -func writeDescriptorPrivateDataSpecifier(w *astikit.BitsWriter, d *DescriptorPrivateDataSpecifier) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Specifier) - - return b.Err() +func writeDescriptorPrivateDataSpecifier(w *bitio.Writer, d DescriptorPrivateDataSpecifier) error { + return w.WriteBits(uint64(d.Specifier), 32) } func calcDescriptorRegistrationLength(d *DescriptorRegistration) uint8 { return uint8(4 + len(d.AdditionalIdentificationInfo)) } -func writeDescriptorRegistration(w *astikit.BitsWriter, d *DescriptorRegistration) error { - b := astikit.NewBitsWriterBatch(w) +func writeDescriptorRegistration(w *bitio.Writer, d *DescriptorRegistration) error { + w.TryWriteBits(uint64(d.FormatIdentifier), 32) + w.TryWrite(d.AdditionalIdentificationInfo) - b.Write(d.FormatIdentifier) - b.Write(d.AdditionalIdentificationInfo) - - return b.Err() + return w.TryError } func calcDescriptorServiceLength(d *DescriptorService) uint8 { @@ -1871,95 +1398,79 @@ func calcDescriptorServiceLength(d *DescriptorService) uint8 { return uint8(ret) } -func writeDescriptorService(w *astikit.BitsWriter, d *DescriptorService) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Type) - b.Write(uint8(len(d.Provider))) - b.Write(d.Provider) - b.Write(uint8(len(d.Name))) - b.Write(d.Name) +func writeDescriptorService(w *bitio.Writer, d *DescriptorService) error { + w.TryWriteByte(d.Type) + w.TryWriteByte(uint8(len(d.Provider))) + w.TryWrite(d.Provider) + w.TryWriteByte(uint8(len(d.Name))) + w.TryWrite(d.Name) - return b.Err() + return w.TryError } func calcDescriptorShortEventLength(d *DescriptorShortEvent) uint8 { - ret := 3 + 1 + 1 // language code and lengths + ret := 3 + 1 + 1 // Language code and lengths. ret += len(d.EventName) ret += len(d.Text) return uint8(ret) } -func writeDescriptorShortEvent(w *astikit.BitsWriter, d *DescriptorShortEvent) error { - b := astikit.NewBitsWriterBatch(w) - - b.WriteBytesN(d.Language, 3, 0) +func writeDescriptorShortEvent(w *bitio.Writer, d *DescriptorShortEvent) error { + w.TryWrite(d.Language) - b.Write(uint8(len(d.EventName))) - b.Write(d.EventName) + w.TryWriteByte(uint8(len(d.EventName))) + w.TryWrite(d.EventName) - b.Write(uint8(len(d.Text))) - b.Write(d.Text) + w.TryWriteByte(uint8(len(d.Text))) + w.TryWrite(d.Text) - return b.Err() + return w.TryError } -func calcDescriptorStreamIdentifierLength(d *DescriptorStreamIdentifier) uint8 { +func calcDescriptorStreamIdentifierLength(d DescriptorStreamIdentifier) uint8 { return 1 } -func writeDescriptorStreamIdentifier(w *astikit.BitsWriter, d *DescriptorStreamIdentifier) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.ComponentTag) - - return b.Err() +func writeDescriptorStreamIdentifier(w *bitio.Writer, d DescriptorStreamIdentifier) error { + return w.WriteByte(d.ComponentTag) } -func calcDescriptorSubtitlingLength(d *DescriptorSubtitling) uint8 { +func calcDescriptorSubtitlingLength(d DescriptorSubtitling) uint8 { return uint8(8 * len(d.Items)) } -func writeDescriptorSubtitling(w *astikit.BitsWriter, d *DescriptorSubtitling) error { - b := astikit.NewBitsWriterBatch(w) - +func writeDescriptorSubtitling(w *bitio.Writer, d DescriptorSubtitling) error { for _, item := range d.Items { - b.WriteBytesN(item.Language, 3, 0) - b.Write(item.Type) - b.Write(item.CompositionPageID) - b.Write(item.AncillaryPageID) + w.TryWrite(item.Language) + w.TryWriteByte(item.Type) + w.TryWriteBits(uint64(item.CompositionPageID), 16) + w.TryWriteBits(uint64(item.AncillaryPageID), 16) } - - return b.Err() + return w.TryError } -func calcDescriptorTeletextLength(d *DescriptorTeletext) uint8 { +func calcDescriptorTeletextLength(d DescriptorTeletext) uint8 { return uint8(5 * len(d.Items)) } -func writeDescriptorTeletext(w *astikit.BitsWriter, d *DescriptorTeletext) error { - b := astikit.NewBitsWriterBatch(w) - +func writeDescriptorTeletext(w *bitio.Writer, d DescriptorTeletext) error { for _, item := range d.Items { - b.WriteBytesN(item.Language, 3, 0) - b.WriteN(item.Type, 5) - b.WriteN(item.Magazine, 3) - b.WriteN(item.Page/10, 4) - b.WriteN(item.Page%10, 4) + w.TryWrite(item.Language) + w.TryWriteBits(uint64(item.Type), 5) + w.TryWriteBits(uint64(item.Magazine), 3) + w.TryWriteBits(uint64(item.Page/10), 4) + w.TryWriteBits(uint64(item.Page%10), 4) } - - return b.Err() + return w.TryError } -func calcDescriptorVBIDataLength(d *DescriptorVBIData) uint8 { - return uint8(3 * len(d.Services)) +func calcDescriptorVBIDataLength(d DescriptorVBIData) uint8 { + return uint8(3 * len(d)) } -func writeDescriptorVBIData(w *astikit.BitsWriter, d *DescriptorVBIData) error { - b := astikit.NewBitsWriterBatch(w) - - for _, item := range d.Services { - b.Write(item.DataServiceID) +func writeDescriptorVBIData(w *bitio.Writer, d DescriptorVBIData) error { + for _, item := range d { + w.TryWriteByte(item.DataServiceID) if item.DataServiceID == VBIDataServiceIDClosedCaptioning || item.DataServiceID == VBIDataServiceIDEBUTeletext || @@ -1967,36 +1478,32 @@ func writeDescriptorVBIData(w *astikit.BitsWriter, d *DescriptorVBIData) error { item.DataServiceID == VBIDataServiceIDMonochrome442Samples || item.DataServiceID == VBIDataServiceIDVPS || item.DataServiceID == VBIDataServiceIDWSS { - - b.Write(uint8(len(item.Descriptors))) // each descriptor is 1 byte + w.TryWriteByte(uint8(len(item.Descriptors))) // Each descriptor is 1 byte. for _, desc := range item.Descriptors { - b.WriteN(uint8(0xff), 2) - b.Write(desc.FieldParity) - b.WriteN(desc.LineOffset, 5) + w.TryWriteBits(0xff, 2) // Reserved. + w.TryWriteBool(desc.FieldParity) + w.TryWriteBits(uint64(desc.LineOffset), 5) } } else { - // let's put one reserved byte - b.Write(uint8(1)) - b.Write(uint8(0xff)) + // Let's put one reserved byte. + w.TryWriteByte(1) + w.TryWriteByte(0xff) } } - return b.Err() + return w.TryError } func calcDescriptorUnknownLength(d *DescriptorUnknown) uint8 { return uint8(len(d.Content)) } -func writeDescriptorUnknown(w *astikit.BitsWriter, d *DescriptorUnknown) error { - b := astikit.NewBitsWriterBatch(w) - - b.Write(d.Content) - - return b.Err() +func writeDescriptorUnknown(w *bitio.Writer, d *DescriptorUnknown) error { + _, err := w.Write(d.Content) + return err } -func calcDescriptorLength(d *Descriptor) uint8 { +func calcDescriptorLength(d *Descriptor) uint8 { //nolint:funlen if d.Tag >= 0x80 && d.Tag <= 0xfe { return calcDescriptorUserDefinedLength(d.UserDefined) } @@ -2054,21 +1561,20 @@ func calcDescriptorLength(d *Descriptor) uint8 { return calcDescriptorUnknownLength(d.Unknown) } -func writeDescriptor(w *astikit.BitsWriter, d *Descriptor) (int, error) { - b := astikit.NewBitsWriterBatch(w) +func writeDescriptor(w *bitio.Writer, d *Descriptor) (int, error) { //nolint:funlen length := calcDescriptorLength(d) - b.Write(d.Tag) - b.Write(length) - - if err := b.Err(); err != nil { - return 0, err + w.TryWriteByte(d.Tag) + w.TryWriteByte(length) + if w.TryError != nil { + return 0, w.TryError } written := int(length) + 2 if d.Tag >= 0x80 && d.Tag <= 0xfe { - return written, writeDescriptorUserDefined(w, d.UserDefined) + _, err := w.Write(d.UserDefined) + return written, err } switch d.Tag { @@ -2126,13 +1632,13 @@ func writeDescriptor(w *astikit.BitsWriter, d *Descriptor) (int, error) { func calcDescriptorsLength(ds []*Descriptor) uint16 { length := uint16(0) for _, d := range ds { - length += 2 // tag and length + length += 2 // Tag and length. length += uint16(calcDescriptorLength(d)) } return length } -func writeDescriptors(w *astikit.BitsWriter, ds []*Descriptor) (int, error) { +func writeDescriptors(w *bitio.Writer, ds []*Descriptor) (int, error) { written := 0 for _, d := range ds { @@ -2146,17 +1652,21 @@ func writeDescriptors(w *astikit.BitsWriter, ds []*Descriptor) (int, error) { return written, nil } -func writeDescriptorsWithLength(w *astikit.BitsWriter, ds []*Descriptor) (int, error) { +func writeDescriptorsWithLength(w *bitio.Writer, ds []*Descriptor) (int, error) { length := calcDescriptorsLength(ds) - b := astikit.NewBitsWriterBatch(w) - b.WriteN(uint8(0xff), 4) // reserved - b.WriteN(length, 12) // program_info_length + w.TryWriteBits(0xff, 4) // Reserved. + w.TryWriteBits(uint64(length), 12) // program_info_length. - if err := b.Err(); err != nil { - return 0, err + if w.TryError != nil { + return 0, w.TryError } written, err := writeDescriptors(w, ds) - return written + 2, err // 2 for length + if err != nil { + return 0, fmt.Errorf("writing descriptors failed: %w", err) + } + + written += 2 + return written, nil } diff --git a/descriptor_test.go b/descriptor_test.go index 088aac5..3e0af6c 100644 --- a/descriptor_test.go +++ b/descriptor_test.go @@ -2,46 +2,47 @@ package astits import ( "bytes" - "github.com/asticode/go-astikit" - "github.com/stretchr/testify/assert" "testing" + + "github.com/icza/bitio" + "github.com/stretchr/testify/assert" ) var descriptors = []*Descriptor{{ Length: 0x1, - StreamIdentifier: &DescriptorStreamIdentifier{ComponentTag: 0x7}, + StreamIdentifier: DescriptorStreamIdentifier{ComponentTag: 0x7}, Tag: DescriptorTagStreamIdentifier, }} -func descriptorsBytes(w *astikit.BitsWriter) { - w.Write("000000000011") // Overall length - w.Write(uint8(DescriptorTagStreamIdentifier)) // Tag - w.Write(uint8(1)) // Length - w.Write(uint8(7)) // Component tag +func descriptorsBytes(w *bitio.Writer) { + WriteBinary(w, "000000000011") // Overall length + w.WriteByte(DescriptorTagStreamIdentifier) // Tag + w.WriteByte(1) // Length + w.WriteByte(7) // Component tag } type descriptorTest struct { name string - bytesFunc func(w *astikit.BitsWriter) + bytesFunc func(w *bitio.Writer) desc Descriptor } var descriptorTestTable = []descriptorTest{ { "AC3", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagAC3)) // Tag - w.Write(uint8(9)) // Length - w.Write("1") // Component type flag - w.Write("1") // BSID flag - w.Write("1") // MainID flag - w.Write("1") // ASVC flag - w.Write("1111") // Reserved flags - w.Write(uint8(1)) // Component type - w.Write(uint8(2)) // BSID - w.Write(uint8(3)) // MainID - w.Write(uint8(4)) // ASVC - w.Write([]byte("info")) // Additional info + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagAC3) // Tag + w.WriteByte(9) // Length + WriteBinary(w, "1") // Component type flag + WriteBinary(w, "1") // BSID flag + WriteBinary(w, "1") // MainID flag + WriteBinary(w, "1") // ASVC flag + WriteBinary(w, "1111") // Reserved flags + w.WriteByte(1) // Component type + w.WriteByte(2) // BSID + w.WriteByte(3) // MainID + w.WriteByte(4) // ASVC + w.Write([]byte("info")) // Additional info }, Descriptor{ Tag: DescriptorTagAC3, @@ -56,15 +57,16 @@ var descriptorTestTable = []descriptorTest{ HasComponentType: true, HasMainID: true, MainID: uint8(3), - }}, + }, + }, }, { "ISO639LanguageAndAudioType", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagISO639LanguageAndAudioType)) // Tag - w.Write(uint8(4)) // Length - w.Write([]byte("eng")) // Language - w.Write(uint8(AudioTypeCleanEffects)) // Audio type + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagISO639LanguageAndAudioType) // Tag + w.WriteByte(4) // Length + w.Write([]byte("eng")) // Language + w.WriteByte(AudioTypeCleanEffects) // Audio type }, Descriptor{ Tag: DescriptorTagISO639LanguageAndAudioType, @@ -72,42 +74,45 @@ var descriptorTestTable = []descriptorTest{ ISO639LanguageAndAudioType: &DescriptorISO639LanguageAndAudioType{ Language: []byte("eng"), Type: AudioTypeCleanEffects, - }}, + }, + }, }, { "MaximumBitrate", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagMaximumBitrate)) // Tag - w.Write(uint8(3)) // Length - w.Write("110000000000000000000001") // Maximum bitrate + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagMaximumBitrate) // Tag + w.WriteByte(3) // Length + WriteBinary(w, "110000000000000000000001") // Maximum bitrate }, Descriptor{ Tag: DescriptorTagMaximumBitrate, Length: 3, - MaximumBitrate: &DescriptorMaximumBitrate{Bitrate: uint32(50)}}, + MaximumBitrate: DescriptorMaximumBitrate{Bitrate: 1}, + }, }, { "NetworkName", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagNetworkName)) // Tag - w.Write(uint8(4)) // Length - w.Write([]byte("name")) // Name + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagNetworkName) // Tag + w.WriteByte(4) // Length + w.Write([]byte("name")) // Name }, Descriptor{ Tag: DescriptorTagNetworkName, Length: 4, - NetworkName: &DescriptorNetworkName{Name: []byte("name")}}, + NetworkName: DescriptorNetworkName{Name: []byte("name")}, + }, }, { "Service", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagService)) // Tag - w.Write(uint8(18)) // Length - w.Write(uint8(ServiceTypeDigitalTelevisionService)) // Type - w.Write(uint8(8)) // Provider name length - w.Write([]byte("provider")) // Provider name - w.Write(uint8(7)) // Service name length - w.Write([]byte("service")) // Service name + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagService) // Tag + w.WriteByte(18) // Length + w.WriteByte(ServiceTypeDigitalTelevisionService) // Type + w.WriteByte(8) // Provider name length + w.Write([]byte("provider")) // Provider name + w.WriteByte(7) // Service name length + w.Write([]byte("service")) // Service name }, Descriptor{ Tag: DescriptorTagService, @@ -116,17 +121,18 @@ var descriptorTestTable = []descriptorTest{ Name: []byte("service"), Provider: []byte("provider"), Type: ServiceTypeDigitalTelevisionService, - }}, + }, + }, }, { "ShortEvent", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagShortEvent)) // Tag - w.Write(uint8(14)) // Length - w.Write([]byte("eng")) // Language code - w.Write(uint8(5)) // Event name length - w.Write([]byte("event")) // Event name - w.Write(uint8(4)) // Text length + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagShortEvent) // Tag + w.WriteByte(14) // Length + w.Write([]byte("eng")) // Language code + w.WriteByte(5) // Event name length + w.Write([]byte("event")) // Event name + w.WriteByte(4) // Text length w.Write([]byte("text")) }, Descriptor{ @@ -136,99 +142,107 @@ var descriptorTestTable = []descriptorTest{ EventName: []byte("event"), Language: []byte("eng"), Text: []byte("text"), - }}, + }, + }, }, { "StreamIdentifier", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagStreamIdentifier)) // Tag - w.Write(uint8(1)) // Length - w.Write(uint8(2)) // Component tag + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagStreamIdentifier) // Tag + w.WriteByte(1) // Length + w.WriteByte(2) // Component tag }, Descriptor{ Tag: DescriptorTagStreamIdentifier, Length: 1, - StreamIdentifier: &DescriptorStreamIdentifier{ComponentTag: 0x2}}, + StreamIdentifier: DescriptorStreamIdentifier{ComponentTag: 0x2}, + }, }, { "Subtitling", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagSubtitling)) // Tag - w.Write(uint8(16)) // Length - w.Write([]byte("lg1")) // Item #1 language - w.Write(uint8(1)) // Item #1 type - w.Write(uint16(2)) // Item #1 composition page - w.Write(uint16(3)) // Item #1 ancillary page - w.Write([]byte("lg2")) // Item #2 language - w.Write(uint8(4)) // Item #2 type - w.Write(uint16(5)) // Item #2 composition page - w.Write(uint16(6)) // Item #2 ancillary page + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagSubtitling) // Tag + w.WriteByte(16) // Length + w.Write([]byte("lg1")) // Item #1 language + w.WriteByte(1) // Item #1 type + w.WriteBits(2, 16) // Item #1 composition page + w.WriteBits(3, 16) // Item #1 ancillary page + w.Write([]byte("lg2")) // Item #2 language + w.WriteByte(4) // Item #2 type + w.WriteBits(5, 16) // Item #2 composition page + w.WriteBits(6, 16) // Item #2 ancillary page }, Descriptor{ Tag: DescriptorTagSubtitling, Length: 16, - Subtitling: &DescriptorSubtitling{Items: []*DescriptorSubtitlingItem{ - { - AncillaryPageID: 3, - CompositionPageID: 2, - Language: []byte("lg1"), - Type: 1, - }, - { - AncillaryPageID: 6, - CompositionPageID: 5, - Language: []byte("lg2"), - Type: 4, + Subtitling: DescriptorSubtitling{ + Items: []*DescriptorSubtitlingItem{ + { + AncillaryPageID: 3, + CompositionPageID: 2, + Language: []byte("lg1"), + Type: 1, + }, + { + AncillaryPageID: 6, + CompositionPageID: 5, + Language: []byte("lg2"), + Type: 4, + }, }, - }}}, + }, + }, }, { "Teletext", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagTeletext)) // Tag - w.Write(uint8(10)) // Length - w.Write([]byte("lg1")) // Item #1 language - w.Write("00001") // Item #1 type - w.Write("010") // Item #1 magazine - w.Write("00010010") // Item #1 page number - w.Write([]byte("lg2")) // Item #2 language - w.Write("00011") // Item #2 type - w.Write("100") // Item #2 magazine - w.Write("00100011") // Item #2 page number + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagTeletext) // Tag + w.WriteByte(10) // Length + w.Write([]byte("lg1")) // Item #1 language + WriteBinary(w, "00001") // Item #1 type + WriteBinary(w, "010") // Item #1 magazine + WriteBinary(w, "00010010") // Item #1 page number + w.Write([]byte("lg2")) // Item #2 language + WriteBinary(w, "00011") // Item #2 type + WriteBinary(w, "100") // Item #2 magazine + WriteBinary(w, "00100011") // Item #2 page number }, Descriptor{ Tag: DescriptorTagTeletext, Length: 10, - Teletext: &DescriptorTeletext{Items: []*DescriptorTeletextItem{ - { - Language: []byte("lg1"), - Magazine: uint8(2), - Page: uint8(12), - Type: uint8(1), - }, - { - Language: []byte("lg2"), - Magazine: uint8(4), - Page: uint8(23), - Type: uint8(3), + Teletext: DescriptorTeletext{ + Items: []*DescriptorTeletextItem{ + { + Language: []byte("lg1"), + Magazine: 2, + Page: 12, + Type: 1, + }, + { + Language: []byte("lg2"), + Magazine: 4, + Page: 23, + Type: 3, + }, }, - }}}, + }, + }, }, { "ExtendedEvent", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagExtendedEvent)) // Tag - w.Write(uint8(30)) // Length - w.Write("0001") // Number - w.Write("0010") // Last descriptor number - w.Write([]byte("lan")) // ISO 639 language code - w.Write(uint8(20)) // Length of items - w.Write(uint8(11)) // Item #1 description length - w.Write([]byte("description")) // Item #1 description - w.Write(uint8(7)) // Item #1 content length - w.Write([]byte("content")) // Item #1 content - w.Write(uint8(4)) // Text length - w.Write([]byte("text")) // Text + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagExtendedEvent) // Tag + w.WriteByte(30) // Length + WriteBinary(w, "0001") // Number + WriteBinary(w, "0010") // Last descriptor number + w.Write([]byte("lan")) // ISO 639 language code + w.WriteByte(20) // Length of items + w.WriteByte(11) // Item #1 description length + w.Write([]byte("description")) // Item #1 description + w.WriteByte(7) // Item #1 content length + w.Write([]byte("content")) // Item #1 content + w.WriteByte(4) // Text length + w.Write([]byte("text")) // Text }, Descriptor{ Tag: DescriptorTagExtendedEvent, @@ -242,29 +256,30 @@ var descriptorTestTable = []descriptorTest{ LastDescriptorNumber: 0x2, Number: 0x1, Text: []byte("text"), - }}, + }, + }, }, { "EnhancedAC3", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagEnhancedAC3)) // Tag - w.Write(uint8(12)) // Length - w.Write("1") // Component type flag - w.Write("1") // BSID flag - w.Write("1") // MainID flag - w.Write("1") // ASVC flag - w.Write("1") // Mix info exists - w.Write("1") // SubStream1 flag - w.Write("1") // SubStream2 flag - w.Write("1") // SubStream3 flag - w.Write(uint8(1)) // Component type - w.Write(uint8(2)) // BSID - w.Write(uint8(3)) // MainID - w.Write(uint8(4)) // ASVC - w.Write(uint8(5)) // SubStream1 - w.Write(uint8(6)) // SubStream2 - w.Write(uint8(7)) // SubStream3 - w.Write([]byte("info")) // Additional info + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagEnhancedAC3) // Tag + w.WriteByte(12) // Length + WriteBinary(w, "1") // Component type flag + WriteBinary(w, "1") // BSID flag + WriteBinary(w, "1") // MainID flag + WriteBinary(w, "1") // ASVC flag + WriteBinary(w, "1") // Mix info exists + WriteBinary(w, "1") // SubStream1 flag + WriteBinary(w, "1") // SubStream2 flag + WriteBinary(w, "1") // SubStream3 flag + w.WriteByte(1) // Component type + w.WriteByte(2) // BSID + w.WriteByte(3) // MainID + w.WriteByte(4) // ASVC + w.WriteByte(5) // SubStream1 + w.WriteByte(6) // SubStream2 + w.WriteByte(7) // SubStream3 + w.Write([]byte("info")) // Additional info }, Descriptor{ Tag: DescriptorTagEnhancedAC3, @@ -286,20 +301,21 @@ var descriptorTestTable = []descriptorTest{ SubStream1: 5, SubStream2: 6, SubStream3: 7, - }}, + }, + }, }, { "Extension", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagExtension)) // Tag - w.Write(uint8(12)) // Length - w.Write(uint8(DescriptorTagExtensionSupplementaryAudio)) // Extension tag - w.Write("1") // Mix type - w.Write("10101") // Editorial classification - w.Write("1") // Reserved - w.Write("1") // Language code flag - w.Write([]byte("lan")) // Language code - w.Write([]byte("private")) // Private data + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagExtension) // Tag + w.WriteByte(12) // Length + w.WriteByte(DescriptorTagExtensionSupplementaryAudio) // Extension tag + WriteBinary(w, "1") // Mix type + WriteBinary(w, "10101") // Editorial classification + WriteBinary(w, "1") // Reserved + WriteBinary(w, "1") // Language code flag + w.Write([]byte("lan")) // Language code + w.Write([]byte("private")) // Private data }, Descriptor{ Tag: DescriptorTagExtension, @@ -314,19 +330,20 @@ var descriptorTestTable = []descriptorTest{ }, Tag: DescriptorTagExtensionSupplementaryAudio, Unknown: nil, - }}, + }, + }, }, { "Component", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagComponent)) // Tag - w.Write(uint8(10)) // Length - w.Write("1010") // Stream content ext - w.Write("0101") // Stream content - w.Write(uint8(1)) // Component type - w.Write(uint8(2)) // Component tag - w.Write([]byte("lan")) // ISO639 language code - w.Write([]byte("text")) // Text + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagComponent) // Tag + w.WriteByte(10) // Length + WriteBinary(w, "1010") // Stream content ext + WriteBinary(w, "0101") // Stream content + w.WriteByte(1) // Component type + w.WriteByte(2) // Component tag + w.Write([]byte("lan")) // ISO639 language code + w.Write([]byte("text")) // Text }, Descriptor{ Tag: DescriptorTagComponent, @@ -338,123 +355,136 @@ var descriptorTestTable = []descriptorTest{ StreamContentExt: 10, StreamContent: 5, Text: []byte("text"), - }}, + }, + }, }, { "Content", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagContent)) // Tag - w.Write(uint8(2)) // Length - w.Write("0001") // Item #1 content nibble level 1 - w.Write("0010") // Item #1 content nibble level 2 - w.Write(uint8(3)) // Item #1 user byte + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagContent) // Tag + w.WriteByte(2) // Length + WriteBinary(w, "0001") // Item #1 content nibble level 1 + WriteBinary(w, "0010") // Item #1 content nibble level 2 + w.WriteByte(3) // Item #1 user byte }, Descriptor{ Tag: DescriptorTagContent, Length: 2, - Content: &DescriptorContent{Items: []*DescriptorContentItem{{ - ContentNibbleLevel1: 1, - ContentNibbleLevel2: 2, - UserByte: 3, - }}}}, + Content: DescriptorContent{ + Items: []*DescriptorContentItem{{ + ContentNibbleLevel1: 1, + ContentNibbleLevel2: 2, + UserByte: 3, + }}, + }, + }, }, { "ParentalRating", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagParentalRating)) // Tag - w.Write(uint8(4)) // Length - w.Write([]byte("cou")) // Item #1 country code - w.Write(uint8(2)) // Item #1 rating + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagParentalRating) // Tag + w.WriteByte(4) // Length + w.Write([]byte("cou")) // Item #1 country code + w.WriteByte(2) // Item #1 rating }, Descriptor{ Tag: DescriptorTagParentalRating, Length: 4, - ParentalRating: &DescriptorParentalRating{Items: []*DescriptorParentalRatingItem{{ - CountryCode: []byte("cou"), - Rating: 2, - }}}}, + ParentalRating: DescriptorParentalRating{ + Items: []*DescriptorParentalRatingItem{{ + CountryCode: []byte("cou"), + Rating: 2, + }}, + }, + }, }, { "LocalTimeOffset", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagLocalTimeOffset)) // Tag - w.Write(uint8(13)) // Length - w.Write([]byte("cou")) // Country code - w.Write("101010") // Country region ID - w.Write("1") // Reserved - w.Write("1") // Local time offset polarity - w.Write(dvbDurationMinutesBytes) // Local time offset - w.Write(dvbTimeBytes) // Time of change - w.Write(dvbDurationMinutesBytes) // Next time offset + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagLocalTimeOffset) // Tag + w.WriteByte(13) // Length + w.Write([]byte("cou")) // Country code + WriteBinary(w, "101010") // Country region ID + WriteBinary(w, "1") // Reserved + WriteBinary(w, "1") // Local time offset polarity + w.Write(dvbDurationMinutesBytes) // Local time offset + w.Write(dvbTimeBytes) // Time of change + w.Write(dvbDurationMinutesBytes) // Next time offset }, Descriptor{ Tag: DescriptorTagLocalTimeOffset, Length: 13, - LocalTimeOffset: &DescriptorLocalTimeOffset{Items: []*DescriptorLocalTimeOffsetItem{{ + LocalTimeOffset: []*DescriptorLocalTimeOffsetItem{{ CountryCode: []byte("cou"), CountryRegionID: 42, LocalTimeOffset: dvbDurationMinutes, LocalTimeOffsetPolarity: true, NextTimeOffset: dvbDurationMinutes, TimeOfChange: dvbTime, - }}}}, + }}, + }, }, { "VBIData", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagVBIData)) // Tag - w.Write(uint8(3)) // Length - w.Write(uint8(VBIDataServiceIDEBUTeletext)) // Service #1 id - w.Write(uint8(1)) // Service #1 descriptor length - w.Write("11") // Service #1 descriptor reserved - w.Write("1") // Service #1 descriptor field polarity - w.Write("10101") // Service #1 descriptor line offset + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagVBIData) // Tag + w.WriteByte(3) // Length + w.WriteByte(VBIDataServiceIDEBUTeletext) // Service #1 id + w.WriteByte(1) // Service #1 descriptor length + WriteBinary(w, "11") // Service #1 descriptor reserved + WriteBinary(w, "1") // Service #1 descriptor field polarity + WriteBinary(w, "10101") // Service #1 descriptor line offset }, Descriptor{ Tag: DescriptorTagVBIData, Length: 3, - VBIData: &DescriptorVBIData{Services: []*DescriptorVBIDataService{{ + VBIData: []*DescriptorVBIDataService{{ DataServiceID: VBIDataServiceIDEBUTeletext, Descriptors: []*DescriptorVBIDataDescriptor{{ FieldParity: true, LineOffset: 21, }}, - }}}}, + }}, + }, }, { "VBITeletext", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagVBITeletext)) // Tag - w.Write(uint8(5)) // Length - w.Write([]byte("lan")) // Item #1 language - w.Write("00001") // Item #1 type - w.Write("010") // Item #1 magazine - w.Write("00010010") // Item #1 page number + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagVBITeletext) // Tag + w.WriteByte(5) // Length + w.Write([]byte("lan")) // Item #1 language + WriteBinary(w, "00001") // Item #1 type + WriteBinary(w, "010") // Item #1 magazine + WriteBinary(w, "00010010") // Item #1 page number }, Descriptor{ Tag: DescriptorTagVBITeletext, Length: 5, - VBITeletext: &DescriptorTeletext{Items: []*DescriptorTeletextItem{{ - Language: []byte("lan"), - Magazine: uint8(2), - Page: uint8(12), - Type: uint8(1), - }}}}, + VBITeletext: DescriptorTeletext{ + []*DescriptorTeletextItem{{ + // Language: 7102830, // "lan" + Language: []byte("lan"), + Magazine: 2, + Page: 12, + Type: 1, + }}, + }, + }, }, { "AVCVideo", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagAVCVideo)) // Tag - w.Write(uint8(4)) // Length - w.Write(uint8(1)) // Profile idc - w.Write("1") // Constraint set0 flag - w.Write("1") // Constraint set1 flag - w.Write("1") // Constraint set1 flag - w.Write("10101") // Compatible flags - w.Write(uint8(2)) // Level idc - w.Write("1") // AVC still present - w.Write("1") // AVC 24 hour picture flag - w.Write("111111") // Reserved + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagAVCVideo) // Tag + w.WriteByte(4) // Length + w.WriteByte(1) // Profile idc + WriteBinary(w, "1") // Constraint set0 flag + WriteBinary(w, "1") // Constraint set1 flag + WriteBinary(w, "1") // Constraint set1 flag + WriteBinary(w, "10101") // Compatible flags + w.WriteByte(2) // Level idc + WriteBinary(w, "1") // AVC still present + WriteBinary(w, "1") // AVC 24 hour picture flag + WriteBinary(w, "111111") // Reserved }, Descriptor{ Tag: DescriptorTagAVCVideo, @@ -468,83 +498,85 @@ var descriptorTestTable = []descriptorTest{ ConstraintSet2Flag: true, LevelIDC: 2, ProfileIDC: 1, - }}, + }, + }, }, { "PrivateDataSpecifier", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagPrivateDataSpecifier)) // Tag - w.Write(uint8(4)) // Length - w.Write(uint32(128)) // Private data specifier + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagPrivateDataSpecifier) // Tag + w.WriteByte(4) // Length + w.WriteBits(128, 32) // Private data specifier }, Descriptor{ - Tag: DescriptorTagPrivateDataSpecifier, - Length: 4, - PrivateDataSpecifier: &DescriptorPrivateDataSpecifier{ - Specifier: 128, - }}, + Tag: DescriptorTagPrivateDataSpecifier, + Length: 4, + PrivateDataSpecifier: DescriptorPrivateDataSpecifier{Specifier: 128}, + }, }, { "DataStreamAlignment", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagDataStreamAlignment)) // Tag - w.Write(uint8(1)) // Length - w.Write(uint8(2)) // Type + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagDataStreamAlignment) // Tag + w.WriteByte(1) // Length + w.WriteByte(2) // Type }, Descriptor{ Tag: DescriptorTagDataStreamAlignment, Length: 1, - DataStreamAlignment: &DescriptorDataStreamAlignment{ - Type: 2, - }}, + + DataStreamAlignment: 2, + }, }, { "PrivateDataIndicator", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagPrivateDataIndicator)) // Tag - w.Write(uint8(4)) // Length - w.Write(uint32(127)) // Private data indicator + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagPrivateDataIndicator) // Tag + w.WriteByte(4) // Length + w.WriteBits(127, 32) // Private data indicator }, Descriptor{ Tag: DescriptorTagPrivateDataIndicator, Length: 4, - PrivateDataIndicator: &DescriptorPrivateDataIndicator{ - Indicator: 127, - }}, + + PrivateDataIndicator: 127, + }, }, { "UserDefined", - func(w *astikit.BitsWriter) { - w.Write(uint8(0x80)) // Tag - w.Write(uint8(4)) // Length + func(w *bitio.Writer) { + w.WriteByte(0x80) // Tag + w.WriteByte(4) // Length w.Write([]byte("test")) // User defined }, Descriptor{ Tag: 0x80, Length: 4, - UserDefined: []byte("test")}, + UserDefined: []byte("test"), + }, }, { "Registration", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagRegistration)) // Tag - w.Write(uint8(8)) // Length - w.Write(uint32(1)) // Format identifier - w.Write([]byte("test")) // Additional identification info + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagRegistration) // Tag + w.WriteByte(8) // Length + w.WriteBits(1, 32) // Format identifier + w.Write([]byte("test")) // Additional identification info }, Descriptor{ Tag: DescriptorTagRegistration, Length: 8, Registration: &DescriptorRegistration{ AdditionalIdentificationInfo: []byte("test"), - FormatIdentifier: uint32(1), - }}, + FormatIdentifier: 1, + }, + }, }, { "Unknown", - func(w *astikit.BitsWriter) { - w.Write(uint8(0x1)) // Tag - w.Write(uint8(4)) // Length + func(w *bitio.Writer) { + w.WriteByte(0x1) // Tag + w.WriteByte(4) // Length w.Write([]byte("test")) // Content }, Descriptor{ @@ -553,15 +585,16 @@ var descriptorTestTable = []descriptorTest{ Unknown: &DescriptorUnknown{ Content: []byte("test"), Tag: 0x1, - }}, + }, + }, }, { "Extension", - func(w *astikit.BitsWriter) { - w.Write(uint8(DescriptorTagExtension)) // Tag - w.Write(uint8(5)) // Length - w.Write(uint8(0)) // Extension tag - w.Write([]byte("test")) // Content + func(w *bitio.Writer) { + w.WriteByte(DescriptorTagExtension) // Tag + w.WriteByte(5) // Length + w.WriteByte(0) // Extension tag + w.Write([]byte("test")) // Content }, Descriptor{ Tag: DescriptorTagExtension, @@ -569,7 +602,8 @@ var descriptorTestTable = []descriptorTest{ Extension: &DescriptorExtension{ Tag: 0, Unknown: &[]byte{'t', 'e', 's', 't'}, - }}, + }, + }, }, } @@ -582,14 +616,18 @@ func TestParseDescriptorOneByOne(t *testing.T) { // 3. compare expected descriptor value and actual buf := bytes.Buffer{} buf.Write([]byte{0x00, 0x00}) // reserve two bytes for length - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) tc.bytesFunc(w) descLen := uint16(buf.Len() - 2) descBytes := buf.Bytes() descBytes[0] = byte(descLen >> 8) descBytes[1] = byte(descLen & 0xff) - ds, err := parseDescriptors(astikit.NewBytesIterator(descBytes)) + r := bitio.NewCountReader(bytes.NewReader(descBytes)) + _, err := r.ReadBits(4) + assert.NoError(t, err) + + ds, err := parseDescriptors(r) assert.NoError(t, err) assert.Equal(t, tc.desc, *ds[0]) }) @@ -599,7 +637,7 @@ func TestParseDescriptorOneByOne(t *testing.T) { func TestParseDescriptorAll(t *testing.T) { buf := bytes.Buffer{} buf.Write([]byte{0x00, 0x00}) // reserve two bytes for length - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) for _, tc := range descriptorTestTable { tc.bytesFunc(w) @@ -610,7 +648,11 @@ func TestParseDescriptorAll(t *testing.T) { descBytes[0] = byte(descLen >> 8) descBytes[1] = byte(descLen & 0xff) - ds, err := parseDescriptors(astikit.NewBytesIterator(descBytes)) + r := bitio.NewCountReader(bytes.NewReader(descBytes)) + _, err := r.ReadBits(4) + assert.NoError(t, err) + + ds, err := parseDescriptors(r) assert.NoError(t, err) for i, tc := range descriptorTestTable { @@ -622,11 +664,11 @@ func TestWriteDescriptorOneByOne(t *testing.T) { for _, tc := range descriptorTestTable { t.Run(tc.name, func(t *testing.T) { bufExpected := bytes.Buffer{} - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) tc.bytesFunc(wExpected) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) n, err := writeDescriptor(wActual, &tc.desc) assert.NoError(t, err) assert.Equal(t, n, bufActual.Len()) @@ -638,7 +680,7 @@ func TestWriteDescriptorOneByOne(t *testing.T) { func TestWriteDescriptorAll(t *testing.T) { bufExpected := bytes.Buffer{} bufExpected.Write([]byte{0x00, 0x00}) // reserve two bytes for length - wExpected := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufExpected}) + wExpected := bitio.NewWriter(&bufExpected) dss := []*Descriptor{} @@ -654,7 +696,7 @@ func TestWriteDescriptorAll(t *testing.T) { descBytes[1] = byte(descLen & 0xff) bufActual := bytes.Buffer{} - wActual := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &bufActual}) + wActual := bitio.NewWriter(&bufActual) n, err := writeDescriptorsWithLength(wActual, dss) assert.NoError(t, err) @@ -666,7 +708,7 @@ func TestWriteDescriptorAll(t *testing.T) { func BenchmarkWriteDescriptor(b *testing.B) { buf := bytes.Buffer{} buf.Grow(1024) - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) for _, bm := range descriptorTestTable { b.Run(bm.name, func(b *testing.B) { @@ -685,7 +727,7 @@ func BenchmarkParseDescriptor(b *testing.B) { for ti, tc := range descriptorTestTable { buf := bytes.Buffer{} buf.Write([]byte{0x00, 0x00}) // reserve two bytes for length - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) + w := bitio.NewWriter(&buf) tc.bytesFunc(w) descLen := uint16(buf.Len() - 2) descBytes := buf.Bytes() @@ -698,7 +740,11 @@ func BenchmarkParseDescriptor(b *testing.B) { b.Run(tc.name, func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - parseDescriptors(astikit.NewBytesIterator(bss[ti])) + r := bitio.NewCountReader(bytes.NewReader(bss[ti])) + + _, err := r.ReadBits(4) + assert.NoError(b, err) + parseDescriptors(r) } }) } diff --git a/dvb.go b/dvb.go index 711a984..2d0a635 100644 --- a/dvb.go +++ b/dvb.go @@ -2,79 +2,73 @@ package astits import ( "fmt" + "strconv" "time" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) // parseDVBTime parses a DVB time -// This field is coded as 16 bits giving the 16 LSBs of MJD followed by 24 bits coded as 6 digits in 4 - bit Binary -// Coded Decimal (BCD). If the start time is undefined (e.g. for an event in a NVOD reference service) all bits of the -// field are set to "1". -// I apologize for the computation which is really messy but details are given in the documentation -// Page: 160 | Annex C | Link: https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf -// (barbashov) the link above can be broken, alternative: https://dvb.org/wp-content/uploads/2019/12/a038_tm1217r37_en300468v1_17_1_-_rev-134_-_si_specification.pdf -func parseDVBTime(i *astikit.BytesIterator) (t time.Time, err error) { - // Get next 2 bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - +// This field is coded as 16 bits giving the 16 LSBs of MJD +// followed by 24 bits coded as 6 digits in 4 - bit Binary +// Coded Decimal (BCD). If the start time is undefined +// (e.g. for an event in a NVOD reference service) +// all bits of the field are set to "1". +// +// Page: 160 | Annex C | Link: +// https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf +func parseDVBTime(r *bitio.CountReader) (time.Time, error) { // Date - var mjd = uint16(bs[0])<<8 | uint16(bs[1]) - var yt = int((float64(mjd) - 15078.2) / 365.25) - var mt = int((float64(mjd) - 14956.1 - float64(int(float64(yt)*365.25))) / 30.6001) - var d = int(float64(mjd) - 14956 - float64(int(float64(yt)*365.25)) - float64(int(float64(mt)*30.6001))) + mjd := uint16(r.TryReadBits(16)) + yt := int((float32(mjd) - 15078.2) / 365.25) + mt := int((float64(mjd) - 14956.1 - float64(uint16(float64(yt)*365.25))) / 30.6001) + d := int(mjd - 14956 - uint16(float64(yt)*365.25) - uint16(float64(mt)*30.6001)) var k int if mt == 14 || mt == 15 { k = 1 } - var y = yt + k - var m = mt - 1 - k*12 - t, _ = time.Parse("06-01-02", fmt.Sprintf("%d-%d-%d", y, m, d)) - - // Time - var s time.Duration - if s, err = parseDVBDurationSeconds(i); err != nil { - err = fmt.Errorf("astits: parsing DVB duration seconds failed: %w", err) - return + y := yt + k + m := mt - 1 - k*12 + + dateStr := strconv.Itoa(y) + "-" + strconv.Itoa(m) + "-" + strconv.Itoa(d) + t, _ := time.Parse("06-01-02", dateStr) + + s, err := parseDVBDurationSeconds(r) + if err != nil { + return time.Time{}, fmt.Errorf("parsing DVB duration seconds failed: %w", err) } + t = t.Add(s) - return + return t, r.TryError } -// parseDVBDurationMinutes parses a minutes duration -// 16 bit field containing the duration of the event in hours, minutes. format: 4 digits, 4 - bit BCD = 18 bit -func parseDVBDurationMinutes(i *astikit.BytesIterator) (d time.Duration, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - d = parseDVBDurationByte(bs[0])*time.Hour + parseDVBDurationByte(bs[1])*time.Minute - return +// parseDVBDurationMinutes parses a minutes duration. +// 16 bit field containing the duration of the event in +// hours, minutes. format: 4 digits, 4 - bit BCD = 18 bit. +func parseDVBDurationMinutes(r *bitio.CountReader) (time.Duration, error) { + d := parseDVBDurationByte(r.TryReadByte())*time.Hour + //nolint:durationcheck + parseDVBDurationByte(r.TryReadByte())*time.Minute //nolint:durationcheck + + return d, r.TryError } -// parseDVBDurationSeconds parses a seconds duration -// 24 bit field containing the duration of the event in hours, minutes, seconds. format: 6 digits, 4 - bit BCD = 24 bit -func parseDVBDurationSeconds(i *astikit.BytesIterator) (d time.Duration, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - d = parseDVBDurationByte(bs[0])*time.Hour + parseDVBDurationByte(bs[1])*time.Minute + parseDVBDurationByte(bs[2])*time.Second - return +// parseDVBDurationSeconds parses a seconds duration. +// 24 bit field containing the duration of the event in hours, +// minutes, seconds. format: 6 digits, 4 - bit BCD = 24 bit. +func parseDVBDurationSeconds(r *bitio.CountReader) (time.Duration, error) { + d := parseDVBDurationByte(r.TryReadByte())*time.Hour + //nolint:durationcheck + parseDVBDurationByte(r.TryReadByte())*time.Minute + //nolint:durationcheck + parseDVBDurationByte(r.TryReadByte())*time.Second //nolint:durationcheck + + return d, r.TryError } -// parseDVBDurationByte parses a duration byte +// parseDVBDurationByte parses a duration byte. func parseDVBDurationByte(i byte) time.Duration { - return time.Duration(uint8(i)>>4*10 + uint8(i)&0xf) + return time.Duration(i>>4*10 + i&0xf) } -func writeDVBTime(w *astikit.BitsWriter, t time.Time) (int, error) { +func writeDVBTime(w *bitio.Writer, t time.Time) (int, error) { year := t.Year() - 1900 month := t.Month() day := t.Day() @@ -88,41 +82,37 @@ func writeDVBTime(w *astikit.BitsWriter, t time.Time) (int, error) { d := t.Sub(t.Truncate(24 * time.Hour)) - b := astikit.NewBitsWriterBatch(w) - - b.Write(uint16(mjd)) + if err := w.WriteBits(uint64(mjd), 16); err != nil { + return 0, err + } bytesWritten, err := writeDVBDurationSeconds(w, d) if err != nil { return 2, err } - return bytesWritten + 2, b.Err() + return bytesWritten + 2, nil } -func writeDVBDurationMinutes(w *astikit.BitsWriter, d time.Duration) (int, error) { - b := astikit.NewBitsWriterBatch(w) - +func writeDVBDurationMinutes(w *bitio.Writer, d time.Duration) error { hours := uint8(d.Hours()) minutes := uint8(int(d.Minutes()) % 60) - b.Write(dvbDurationByteRepresentation(hours)) - b.Write(dvbDurationByteRepresentation(minutes)) + w.TryWriteByte(dvbDurationByteRepresentation(hours)) + w.TryWriteByte(dvbDurationByteRepresentation(minutes)) - return 2, b.Err() + return w.TryError } -func writeDVBDurationSeconds(w *astikit.BitsWriter, d time.Duration) (int, error) { - b := astikit.NewBitsWriterBatch(w) - +func writeDVBDurationSeconds(w *bitio.Writer, d time.Duration) (int, error) { hours := uint8(d.Hours()) minutes := uint8(int(d.Minutes()) % 60) seconds := uint8(int(d.Seconds()) % 60) - b.Write(dvbDurationByteRepresentation(hours)) - b.Write(dvbDurationByteRepresentation(minutes)) - b.Write(dvbDurationByteRepresentation(seconds)) + w.TryWriteByte(dvbDurationByteRepresentation(hours)) + w.TryWriteByte(dvbDurationByteRepresentation(minutes)) + w.TryWriteByte(dvbDurationByteRepresentation(seconds)) - return 3, b.Err() + return 3, w.TryError } func dvbDurationByteRepresentation(n uint8) uint8 { diff --git a/dvb_test.go b/dvb_test.go index da09c49..6c47ce6 100644 --- a/dvb_test.go +++ b/dvb_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) @@ -19,26 +19,29 @@ var ( ) func TestParseDVBTime(t *testing.T) { - d, err := parseDVBTime(astikit.NewBytesIterator(dvbTimeBytes)) + r := bitio.NewCountReader(bytes.NewReader(dvbTimeBytes)) + d, err := parseDVBTime(r) assert.Equal(t, dvbTime, d) assert.NoError(t, err) } func TestParseDVBDurationMinutes(t *testing.T) { - d, err := parseDVBDurationMinutes(astikit.NewBytesIterator(dvbDurationMinutesBytes)) + r := bitio.NewCountReader(bytes.NewReader(dvbDurationMinutesBytes)) + d, err := parseDVBDurationMinutes(r) assert.Equal(t, dvbDurationMinutes, d) assert.NoError(t, err) } func TestParseDVBDurationSeconds(t *testing.T) { - d, err := parseDVBDurationSeconds(astikit.NewBytesIterator(dvbDurationSecondsBytes)) + r := bitio.NewCountReader(bytes.NewReader(dvbDurationSecondsBytes)) + d, err := parseDVBDurationSeconds(r) assert.Equal(t, dvbDurationSeconds, d) assert.NoError(t, err) } func TestWriteDVBTime(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writeDVBTime(w, dvbTime) assert.NoError(t, err) assert.Equal(t, n, buf.Len()) @@ -47,16 +50,15 @@ func TestWriteDVBTime(t *testing.T) { func TestWriteDVBDurationMinutes(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - n, err := writeDVBDurationMinutes(w, dvbDurationMinutes) + w := bitio.NewWriter(buf) + err := writeDVBDurationMinutes(w, dvbDurationMinutes) assert.NoError(t, err) - assert.Equal(t, n, buf.Len()) assert.Equal(t, dvbDurationMinutesBytes, buf.Bytes()) } func TestWriteDVBDurationSeconds(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writeDVBDurationSeconds(w, dvbDurationSeconds) assert.NoError(t, err) assert.Equal(t, n, buf.Len()) diff --git a/go.mod b/go.mod index b1a9888..e54da86 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.13 require ( github.com/asticode/go-astikit v0.20.0 + github.com/icza/bitio v1.1.0 github.com/pkg/profile v1.4.0 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.7.1 ) diff --git a/go.sum b/go.sum index 72723ee..7868e97 100644 --- a/go.sum +++ b/go.sum @@ -2,14 +2,18 @@ github.com/asticode/go-astikit v0.20.0 h1:+7N+J4E4lWx2QOkRdOf6DafWJMv6O4RRfgClwQ github.com/asticode/go-astikit v0.20.0/go.mod h1:h4ly7idim1tNhaVkdVBeXQZEE3L0xblP7fCWbgwipF0= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/icza/bitio v1.1.0 h1:ysX4vtldjdi3Ygai5m1cWy4oLkhWTAi+SyO6HC8L9T0= +github.com/icza/bitio v1.1.0/go.mod h1:0jGnlLAx8MKMr9VGnn/4YrvZiprkvBelsVIbA9Jjr9A= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI= github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/muxer.go b/muxer.go index 43ace2a..34435f5 100644 --- a/muxer.go +++ b/muxer.go @@ -4,32 +4,32 @@ import ( "bytes" "context" "errors" - "io" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) const ( - startPID uint16 = 0x0100 pmtStartPID uint16 = 0x1000 programNumberStart uint16 = 1 ) +// Errors. var ( - ErrPIDNotFound = errors.New("astits: PID not found") - ErrPIDAlreadyExists = errors.New("astits: PID already exists") - ErrPCRPIDInvalid = errors.New("astits: PCR PID invalid") + ErrPIDMissing = errors.New("PID missing") + ErrPIDAlreadyExists = errors.New("PID already exists") + ErrPCRPIDInvalid = errors.New("PCR PID invalid") ) +// Muxer . type Muxer struct { ctx context.Context - w io.Writer - bitsWriter *astikit.BitsWriter + w WriterAndByteWriter + bitsWriter *bitio.Writer packetSize int - tablesRetransmitPeriod int // period in PES packets + tablesRetransmitPeriod int // period in PES packets. - pm *programMap // pid -> programNumber + pm *programMap // pid -> programNumber. pmUpdated bool pmt PMTData pmtUpdated bool @@ -43,7 +43,7 @@ type Muxer struct { pmtBytes bytes.Buffer buf bytes.Buffer - bufWriter *astikit.BitsWriter + bufWriter *bitio.Writer esContexts map[uint16]*esContext tablesRetransmitCounter int @@ -57,24 +57,28 @@ type esContext struct { func newEsContext(es *PMTElementaryStream) *esContext { return &esContext{ es: es, - cc: newWrappingCounter(0b1111), // CC is 4 bits + cc: newWrappingCounter(0b1111), // CC is 4 bits. } } +// MuxerOptTablesRetransmitPeriod . func MuxerOptTablesRetransmitPeriod(newPeriod int) func(*Muxer) { return func(m *Muxer) { m.tablesRetransmitPeriod = newPeriod } } -// TODO MuxerOptAutodetectPCRPID selecting first video PID for each PMT, falling back to first audio, falling back to any other +// TODO MuxerOptAutodetectPCRPID selecting +// first video PID for each PMT, falling back +// to first audio, falling back to any other. -func NewMuxer(ctx context.Context, w io.Writer, opts ...func(*Muxer)) *Muxer { +// NewMuxer . +func NewMuxer(ctx context.Context, w WriterAndByteWriter, opts ...func(*Muxer)) *Muxer { m := &Muxer{ ctx: ctx, w: w, - packetSize: MpegTsPacketSize, // no 192-byte packet support yet + packetSize: MpegTsPacketSize, // no 192-byte packet support yet. tablesRetransmitPeriod: 40, pm: newProgramMap(), @@ -83,7 +87,7 @@ func NewMuxer(ctx context.Context, w io.Writer, opts ...func(*Muxer)) *Muxer { ProgramNumber: programNumberStart, }, - // table version is 5-bit field + // table version is 5-bit field. patVersion: newWrappingCounter(0b11111), pmtVersion: newWrappingCounter(0b11111), @@ -93,10 +97,10 @@ func NewMuxer(ctx context.Context, w io.Writer, opts ...func(*Muxer)) *Muxer { esContexts: map[uint16]*esContext{}, } - m.bufWriter = astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &m.buf}) - m.bitsWriter = astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: m.w}) + m.bufWriter = bitio.NewWriter(&m.buf) + m.bitsWriter = bitio.NewWriter(m.w) - // TODO multiple programs support + // TODO multiple programs support. m.pm.set(pmtStartPID, programNumberStart) m.pmUpdated = true @@ -104,13 +108,14 @@ func NewMuxer(ctx context.Context, w io.Writer, opts ...func(*Muxer)) *Muxer { opt(m) } - // to output tables at the very start + // to output tables at the very start. m.tablesRetransmitCounter = m.tablesRetransmitPeriod return m } -// if es.ElementaryPID is zero, it will be generated automatically +// AddElementaryStream if es.ElementaryPID is zero, +// it will be generated automatically. func (m *Muxer) AddElementaryStream(es PMTElementaryStream) error { if es.ElementaryPID != 0 { for _, oes := range m.pmt.ElementaryStreams { @@ -126,12 +131,13 @@ func (m *Muxer) AddElementaryStream(es PMTElementaryStream) error { m.pmt.ElementaryStreams = append(m.pmt.ElementaryStreams, &es) m.esContexts[es.ElementaryPID] = newEsContext(&es) - // invalidate pmt cache + // Invalidate pmt cache. m.pmtBytes.Reset() m.pmtUpdated = true return nil } +// RemoveElementaryStream . func (m *Muxer) RemoveElementaryStream(pid uint16) error { foundIdx := -1 for i, oes := range m.pmt.ElementaryStreams { @@ -142,7 +148,7 @@ func (m *Muxer) RemoveElementaryStream(pid uint16) error { } if foundIdx == -1 { - return ErrPIDNotFound + return ErrPIDMissing } m.pmt.ElementaryStreams = append(m.pmt.ElementaryStreams[:foundIdx], m.pmt.ElementaryStreams[foundIdx+1:]...) @@ -152,19 +158,19 @@ func (m *Muxer) RemoveElementaryStream(pid uint16) error { return nil } -// SetPCRPID marks pid as one to look PCRs in +// SetPCRPID marks pid as one to look PCRs in. func (m *Muxer) SetPCRPID(pid uint16) { m.pmt.PCRPID = pid m.pmtUpdated = true } -// WriteData writes MuxerData to TS stream -// Currently only PES packets are supported -// Be aware that after successful call WriteData will set d.AdaptationField.StuffingLength value to zero -func (m *Muxer) WriteData(d *MuxerData) (int, error) { +// WriteData writes MuxerData to TS stream. Currently only +// PES packets are supported. Be aware that after successful call +// WriteData will set d.AdaptationField.StuffingLength value to zero. +func (m *Muxer) WriteData(d *MuxerData) (int, error) { //nolint:funlen,gocognit ctx, ok := m.esContexts[d.PID] if !ok { - return 0, ErrPIDNotFound + return 0, ErrPIDMissing } bytesWritten := 0 @@ -182,9 +188,9 @@ func (m *Muxer) WriteData(d *MuxerData) (int, error) { payloadStart := true writeAf := d.AdaptationField != nil - payloadBytesWritten := 0 + var payloadBytesWritten int for payloadBytesWritten < len(d.PES.Data) { - pktLen := 1 + mpegTsPacketHeaderSize // sync byte + header + pktLen := 1 + mpegTsPacketHeaderSize // sync byte + header. pkt := Packet{ Header: &PacketHeader{ ContinuityCounter: uint8(ctx.cc.inc()), @@ -197,31 +203,19 @@ func (m *Muxer) WriteData(d *MuxerData) (int, error) { if writeAf { pkt.AdaptationField = d.AdaptationField - // one byte for adaptation field length field + // One byte for adaptation field length field. pktLen += 1 + int(calcPacketAdaptationFieldLength(d.AdaptationField)) writeAf = false } bytesAvailable := m.packetSize - pktLen if payloadStart { - pesHeaderLengthCurrent := pesHeaderLength + int(calcPESOptionalHeaderLength(d.PES.Header.OptionalHeader)) - // d.AdaptationField with pes header are too big, we don't have space to write pes header - if bytesAvailable < pesHeaderLengthCurrent { - pkt.Header.HasAdaptationField = true - if pkt.AdaptationField == nil { - pkt.AdaptationField = newStuffingAdaptationField(bytesAvailable) - } else { - pkt.AdaptationField.StuffingLength = bytesAvailable - } - } else { - pkt.Header.HasPayload = true - pkt.Header.PayloadUnitStartIndicator = true - } + processPayloadStart(bytesAvailable, &pkt, d) } else { pkt.Header.HasPayload = true } - if pkt.Header.HasPayload { + if pkt.Header.HasPayload { //nolint:nestif m.buf.Reset() if d.PES.Header.StreamID == 0 { d.PES.Header.StreamID = ctx.es.StreamType.ToPESStreamID() @@ -243,8 +237,10 @@ func (m *Muxer) WriteData(d *MuxerData) (int, error) { pkt.Payload = m.buf.Bytes() bytesAvailable -= ntot - // if we still have some space in packet, we should stuff it with adaptation field stuffing - // we can't stuff packets with 0xff at the end of a packet since it's not uncommon for PES payloads to have length unspecified + // If we still have some space in packet, we should stuff + // it with adaptation field stuffing we can't stuff packets + // with 0xff at the end of a packet since it's not + // uncommon for PES payloads to have length unspecified. if bytesAvailable > 0 { pkt.Header.HasAdaptationField = true if pkt.AdaptationField == nil { @@ -272,8 +268,26 @@ func (m *Muxer) WriteData(d *MuxerData) (int, error) { return bytesWritten, nil } -// Writes given packet to MPEG-TS stream -// Stuffs with 0xffs if packet turns out to be shorter than target packet length +func processPayloadStart(bytesAvailable int, pkt *Packet, d *MuxerData) { + pesHeaderLengthCurrent := pesHeaderLength + + int(calcPESOptionalHeaderLength(d.PES.Header.OptionalHeader)) + // d.AdaptationField with pes header are too big, + // we don't have space to write pes header. + if bytesAvailable < pesHeaderLengthCurrent { + pkt.Header.HasAdaptationField = true + if pkt.AdaptationField == nil { + pkt.AdaptationField = newStuffingAdaptationField(bytesAvailable) + } else { + pkt.AdaptationField.StuffingLength = bytesAvailable + } + } else { + pkt.Header.HasPayload = true + pkt.Header.PayloadUnitStartIndicator = true + } +} + +// WritePacket Writes given packet to MPEG-TS stream +// Stuffs with 0xffs if packet turns out to be shorter than target packet length. func (m *Muxer) WritePacket(p *Packet) (int, error) { return writePacket(m.bitsWriter, p, m.packetSize) } @@ -293,6 +307,7 @@ func (m *Muxer) retransmitTables(force bool) (int, error) { return n, nil } +// WriteTables . func (m *Muxer) WriteTables() (int, error) { bytesWritten := 0 @@ -331,9 +346,9 @@ func (m *Muxer) generatePAT() error { Data: &PSISectionSyntaxData{PAT: d}, Header: &PSISectionSyntaxHeader{ CurrentNextIndicator: true, - // TODO support for PAT tables longer than 1 TS packet - //LastSectionNumber: 0, - //SectionNumber: 0, + // TODO support for PAT tables longer than 1 TS packet. + // LastSectionNumber: 0, + // SectionNumber: 0, TableIDExtension: d.TransportStreamID, VersionNumber: uint8(versionNumber), }, @@ -351,13 +366,13 @@ func (m *Muxer) generatePAT() error { } m.buf.Reset() - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &m.buf}) - if _, err := writePSIData(w, &psiData); err != nil { + w := bitio.NewWriter(&m.buf) + if err := writePSIData(w, &psiData); err != nil { return err } m.patBytes.Reset() - wPacket := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &m.patBytes}) + wPacket := bitio.NewWriter(&m.patBytes) pkt := Packet{ Header: &PacketHeader{ @@ -378,7 +393,7 @@ func (m *Muxer) generatePAT() error { return nil } -func (m *Muxer) generatePMT() error { +func (m *Muxer) generatePMT() error { //nolint:funlen hasPCRPID := false for _, es := range m.pmt.ElementaryStreams { if es.ElementaryPID == m.pmt.PCRPID { @@ -399,9 +414,9 @@ func (m *Muxer) generatePMT() error { Data: &PSISectionSyntaxData{PMT: &m.pmt}, Header: &PSISectionSyntaxHeader{ CurrentNextIndicator: true, - // TODO support for PMT tables longer than 1 TS packet - //LastSectionNumber: 0, - //SectionNumber: 0, + // TODO support for PMT tables longer than 1 TS packet. + // LastSectionNumber: 0, + // SectionNumber: 0, TableIDExtension: m.pmt.ProgramNumber, VersionNumber: uint8(versionNumber), }, @@ -419,19 +434,19 @@ func (m *Muxer) generatePMT() error { } m.buf.Reset() - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &m.buf}) - if _, err := writePSIData(w, &psiData); err != nil { + w := bitio.NewWriter(&m.buf) + if err := writePSIData(w, &psiData); err != nil { return err } m.pmtBytes.Reset() - wPacket := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &m.pmtBytes}) + wPacket := bitio.NewWriter(&m.pmtBytes) pkt := Packet{ Header: &PacketHeader{ HasPayload: true, PayloadUnitStartIndicator: true, - PID: pmtStartPID, // FIXME multiple programs support + PID: pmtStartPID, // FIXME multiple programs support. ContinuityCounter: uint8(m.pmtCC.inc()), }, Payload: m.buf.Bytes(), diff --git a/muxer_test.go b/muxer_test.go index caa3e69..00dab74 100644 --- a/muxer_test.go +++ b/muxer_test.go @@ -5,33 +5,33 @@ import ( "context" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) func patExpectedBytes(versionNumber uint8, cc uint8) []byte { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) - w.Write(uint8(syncByte)) - w.Write("010") // no transport error, payload start, no priority - w.WriteN(PIDPAT, 13) - w.Write("0001") // no scrambling, no AF, payload present - w.WriteN(cc, 4) - - w.Write(uint16(0)) // Table ID - w.Write("1011") // Syntax section indicator, private bit, reserved - w.WriteN(uint16(13), 12) // Section length - - w.Write(uint16(PSITableIDPAT)) - w.Write("11") // Reserved bits - w.WriteN(versionNumber, 5) // Version number - w.Write("1") // Current/next indicator - w.Write(uint8(0)) // Section number - w.Write(uint8(0)) // Last section number - - w.Write(programNumberStart) - w.Write("111") // reserved - w.WriteN(pmtStartPID, 13) + w := bitio.NewWriter(&buf) + w.WriteByte(uint8(syncByte)) + WriteBinary(w, "010") // no transport error, payload start, no priority + w.WriteBits(uint64(PIDPAT), 13) + WriteBinary(w, "0001") // no scrambling, no AF, payload present + w.WriteBits(uint64(cc), 4) + + w.WriteBits(uint64(0), 16) // Table ID + WriteBinary(w, "1011") // Syntax section indicator, private bit, reserved + w.WriteBits(uint64(13), 12) // Section length + + w.WriteBits(uint64(PSITableIDPAT), 16) + WriteBinary(w, "11") // Reserved bits + w.WriteBits(uint64(versionNumber), 5) // Version number + WriteBinary(w, "1") // Current/next indicator + w.WriteByte(0) // Section number + w.WriteByte(0) // Last section number + + w.WriteBits(uint64(programNumberStart), 16) + WriteBinary(w, "111") // reserved + w.WriteBits(uint64(pmtStartPID), 13) // CRC32 if versionNumber == 0 { @@ -69,36 +69,36 @@ func TestMuxer_generatePAT(t *testing.T) { func pmtExpectedBytesVideoOnly(versionNumber, cc uint8) []byte { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) - w.Write(uint8(syncByte)) - w.Write("010") // no transport error, payload start, no priority - w.WriteN(pmtStartPID, 13) - w.Write("0001") // no scrambling, no AF, payload present - w.WriteN(cc, 4) + w := bitio.NewWriter(&buf) + w.WriteByte(uint8(syncByte)) + WriteBinary(w, "010") // no transport error, payload start, no priority + w.WriteBits(uint64(pmtStartPID), 13) + WriteBinary(w, "0001") // no scrambling, no AF, payload present + w.WriteBits(uint64(cc), 4) - w.Write(uint16(PSITableIDPMT)) // Table ID - w.Write("1011") // Syntax section indicator, private bit, reserved - w.WriteN(uint16(18), 12) // Section length + w.WriteBits(uint64(PSITableIDPMT), 16) // Table ID + WriteBinary(w, "1011") // Syntax section indicator, private bit, reserved + w.WriteBits(18, 12) // Section length - w.Write(programNumberStart) - w.Write("11") // Reserved bits - w.WriteN(versionNumber, 5) // Version number - w.Write("1") // Current/next indicator - w.Write(uint8(0)) // Section number - w.Write(uint8(0)) // Last section number + w.WriteBits(uint64(programNumberStart), 16) + WriteBinary(w, "11") // Reserved bits + w.WriteBits(uint64(versionNumber), 5) // Version number + WriteBinary(w, "1") // Current/next indicator + w.WriteByte(0) // Section number + w.WriteByte(0) // Last section number - w.Write("111") // reserved - w.WriteN(uint16(0x1234), 13) // PCR PID + WriteBinary(w, "111") // reserved + w.WriteBits(0x1234, 13) // PCR PID - w.Write("1111") // reserved - w.WriteN(uint16(0), 12) // program info length + WriteBinary(w, "1111") // reserved + w.WriteBits(0, 12) // program info length - w.Write(uint8(StreamTypeH264Video)) - w.Write("111") // reserved - w.WriteN(uint16(0x1234), 13) + w.WriteByte(uint8(StreamTypeH264Video)) + WriteBinary(w, "111") // reserved + w.WriteBits(0x1234, 13) - w.Write("1111") // reserved - w.WriteN(uint16(0), 12) // es info length + WriteBinary(w, "1111") // reserved + w.WriteBits(0, 12) // es info length w.Write([]byte{0x31, 0x48, 0x5b, 0xa2}) // CRC32 @@ -109,41 +109,41 @@ func pmtExpectedBytesVideoOnly(versionNumber, cc uint8) []byte { func pmtExpectedBytesVideoAndAudio(versionNumber uint8, cc uint8) []byte { buf := bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: &buf}) - w.Write(uint8(syncByte)) - w.Write("010") // no transport error, payload start, no priority - w.WriteN(pmtStartPID, 13) - w.Write("0001") // no scrambling, no AF, payload present - w.WriteN(cc, 4) - - w.Write(uint16(PSITableIDPMT)) // Table ID - w.Write("1011") // Syntax section indicator, private bit, reserved - w.WriteN(uint16(23), 12) // Section length - - w.Write(programNumberStart) - w.Write("11") // Reserved bits - w.WriteN(versionNumber, 5) // Version number - w.Write("1") // Current/next indicator - w.Write(uint8(0)) // Section number - w.Write(uint8(0)) // Last section number - - w.Write("111") // reserved - w.WriteN(uint16(0x1234), 13) // PCR PID - - w.Write("1111") // reserved - w.WriteN(uint16(0), 12) // program info length - - w.Write(uint8(StreamTypeH264Video)) - w.Write("111") // reserved - w.WriteN(uint16(0x1234), 13) - w.Write("1111") // reserved - w.WriteN(uint16(0), 12) // es info length - - w.Write(uint8(StreamTypeADTS)) - w.Write("111") // reserved - w.WriteN(uint16(0x0234), 13) - w.Write("1111") // reserved - w.WriteN(uint16(0), 12) // es info length + w := bitio.NewWriter(&buf) + w.WriteByte(uint8(syncByte)) + WriteBinary(w, "010") // no transport error, payload start, no priority + w.WriteBits(uint64(pmtStartPID), 13) + WriteBinary(w, "0001") // no scrambling, no AF, payload present + w.WriteBits(uint64(cc), 4) + + w.WriteBits(uint64(PSITableIDPMT), 16) // Table ID + WriteBinary(w, "1011") // Syntax section indicator, private bit, reserved + w.WriteBits(23, 12) // Section length + + w.WriteBits(uint64(programNumberStart), 16) + WriteBinary(w, "11") // Reserved bits + w.WriteBits(uint64(versionNumber), 5) // Version number + WriteBinary(w, "1") // Current/next indicator + w.WriteByte(0) // Section number + w.WriteByte(0) // Last section number + + WriteBinary(w, "111") // reserved + w.WriteBits(0x1234, 13) // PCR PID + + WriteBinary(w, "1111") // reserved + w.WriteBits(0, 12) // program info length + + w.WriteByte(uint8(StreamTypeH264Video)) + WriteBinary(w, "111") // reserved + w.WriteBits(0x1234, 13) + WriteBinary(w, "1111") // reserved + w.WriteBits(0, 12) // es info length + + w.WriteByte(uint8(StreamTypeADTS)) + WriteBinary(w, "111") // reserved + w.WriteBits(0x0234, 13) + WriteBinary(w, "1111") // reserved + w.WriteBits(0, 12) // es info length // CRC32 if versionNumber == 0 { @@ -248,7 +248,7 @@ func TestMuxer_RemoveElementaryStream(t *testing.T) { assert.NoError(t, err) err = muxer.RemoveElementaryStream(0x1234) - assert.Equal(t, ErrPIDNotFound, err) + assert.ErrorIs(t, err, ErrPIDMissing) } func testPayload() []byte { diff --git a/packet.go b/packet.go index c126120..53f8f7f 100644 --- a/packet.go +++ b/packet.go @@ -1,11 +1,13 @@ package astits import ( + "errors" "fmt" - "github.com/asticode/go-astikit" + + "github.com/icza/bitio" ) -// Scrambling Controls +// Scrambling Controls. const ( ScramblingControlNotScrambled = 0 ScramblingControlReservedForFutureUse = 1 @@ -13,323 +15,312 @@ const ( ScramblingControlScrambledWithOddKey = 3 ) +// Constants. const ( MpegTsPacketSize = 188 mpegTsPacketHeaderSize = 3 pcrBytesSize = 6 ) -// Packet represents a packet +// Packet represents a packet. // https://en.wikipedia.org/wiki/MPEG_transport_stream type Packet struct { AdaptationField *PacketAdaptationField Header *PacketHeader - Payload []byte // This is only the payload content + Payload []byte // This is only the payload content. } -// PacketHeader represents a packet header +// PacketHeader represents a packet header. type PacketHeader struct { - ContinuityCounter uint8 // Sequence number of payload packets (0x00 to 0x0F) within each stream (except PID 8191) - HasAdaptationField bool - HasPayload bool - PayloadUnitStartIndicator bool // Set when a PES, PSI, or DVB-MIP packet begins immediately following the header. - PID uint16 // Packet Identifier, describing the payload data. - TransportErrorIndicator bool // Set when a demodulator can't correct errors from FEC data; indicating the packet is corrupt. - TransportPriority bool // Set when the current packet has a higher priority than other packets with the same PID. - TransportScramblingControl uint8 + // TransportErrorIndicator set when a demodulator can't correct + // errors from FEC data; indicating the packet is corrupt. + TransportErrorIndicator bool + + // PayloadUnitStartIndicator set when a PES, PSI, or DVB-MIP + // packet begins immediately following the header. + PayloadUnitStartIndicator bool + + // TransportPriority set when the current packet has a higher + // priority than other packets with the same PID. + TransportPriority bool + + // PID Packet Identifier, describing the payload data. + PID uint16 // 13 bits. + + TransportScramblingControl uint8 // 2 Bits. + + HasAdaptationField bool + HasPayload bool + + // ContinuityCounter Sequence number of payload packets + // (0x00 to 0x0F) within each stream (except PID 8191) + ContinuityCounter uint8 } -// PacketAdaptationField represents a packet adaptation field +// PacketAdaptationField represents a packet adaptation field. type PacketAdaptationField struct { - AdaptationExtensionField *PacketAdaptationExtensionField - DiscontinuityIndicator bool // Set if current TS packet is in a discontinuity state with respect to either the continuity counter or the program clock reference - ElementaryStreamPriorityIndicator bool // Set when this stream should be considered "high priority" + AdaptationExtensionField *PacketAdaptationExtensionField + + // DiscontinuityIndicator set if current TS packet is + // in a discontinuity state with respect to either the + // continuity counter or the program clock reference. + DiscontinuityIndicator bool + + // ElementaryStreamPriorityIndicator set when this + // stream should be considered "high priority". + ElementaryStreamPriorityIndicator bool HasAdaptationExtensionField bool HasOPCR bool HasPCR bool HasTransportPrivateData bool HasSplicingCountdown bool Length int - IsOneByteStuffing bool // Only used for one byte stuffing - if true, adaptation field will be written as one uint8(0). Not part of TS format - StuffingLength int // Only used in writePacketAdaptationField to request stuffing - OPCR *ClockReference // Original Program clock reference. Helps when one TS is copied into another - PCR *ClockReference // Program clock reference - RandomAccessIndicator bool // Set when the stream may be decoded without errors from this point - SpliceCountdown int // Indicates how many TS packets from this one a splicing point occurs (Two's complement signed; may be negative) - TransportPrivateDataLength int - TransportPrivateData []byte + + // IsOneByteStuffing only used for one byte + // stuffing - if true, adaptation field will be + // written as one uint8(0). Not part of TS format. + IsOneByteStuffing bool + + // StuffingLength only used in writePacketAdaptationField + // to request stuffing. + StuffingLength int + + // OPCR Original Program clock reference. + // Helps when one TS is copied into another. + OPCR *ClockReference + + // PCR Program clock reference. + PCR *ClockReference + + // RandomAccessIndicator set when the stream may + // be decoded without errors from this point. + RandomAccessIndicator bool + + // SpliceCountdown indicates how many TS packets + // from this one a splicing point occurs + // (Two's complement signed; may be negative). + SpliceCountdown uint8 + TransportPrivateDataLength uint8 + TransportPrivateData []byte } -// PacketAdaptationExtensionField represents a packet adaptation extension field +// PacketAdaptationExtensionField represents a packet adaptation extension field. type PacketAdaptationExtensionField struct { - DTSNextAccessUnit *ClockReference // The PES DTS of the splice point. Split up as 3 bits, 1 marker bit (0x1), 15 bits, 1 marker bit, 15 bits, and 1 marker bit, for 33 data bits total. - HasLegalTimeWindow bool - HasPiecewiseRate bool - HasSeamlessSplice bool + Length uint8 + + HasLegalTimeWindow bool + HasPiecewiseRate bool + HasSeamlessSplice bool + LegalTimeWindowIsValid bool - LegalTimeWindowOffset uint16 // Extra information for rebroadcasters to determine the state of buffers when packets may be missing. - Length int - PiecewiseRate uint32 // The rate of the stream, measured in 188-byte packets, to define the end-time of the LTW. - SpliceType uint8 // Indicates the parameters of the H.262 splice. -} -// parsePacket parses a packet -func parsePacket(i *astikit.BytesIterator) (p *Packet, err error) { - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: getting next byte failed: %w", err) - return - } + // Extra information for rebroadcasters to determine + // the state of buffers when packets may be missing. + LegalTimeWindowOffset uint16 // 15 bits. - // Packet must start with a sync byte + // The rate of the stream, measured in 188-byte + // packets, to define the end-time of the LTW. + PiecewiseRate uint32 // 22 bits. + + // Indicates the parameters of the H.262 splice. + SpliceType uint8 // 4 bits. + + // The PES DTS of the splice point. Split up as 3 bits, + // 1 marker bit (0x1), 15 bits, 1 marker bit, 15 bits, + // and 1 marker bit, for 33 data bits total. + DTSNextAccessUnit *ClockReference +} + +// parsePacket parses a packet. +func parsePacket(r *bitio.CountReader, pktLength int64) (*Packet, error) { + // Packet must start with a sync byte. + b := r.TryReadByte() if b != syncByte { - err = ErrPacketMustStartWithASyncByte - return + return nil, ErrPacketStartSyncByte } - // Create packet - p = &Packet{} + p := &Packet{} - // In case packet size is bigger than 188 bytes, we don't care for the first bytes - i.Seek(i.Len() - MpegTsPacketSize + 1) - offsetStart := i.Offset() + // In case packet size is bigger than 188 bytes, + // we don't care for the first bytes. + var startOffset uint8 + if pktLength > 188*8 { + startOffset = uint8(pktLength/8 - MpegTsPacketSize) - // Parse header - if p.Header, err = parsePacketHeader(i); err != nil { - err = fmt.Errorf("astits: parsing packet header failed: %w", err) - return + skip := make([]byte, startOffset) + TryReadFull(r, skip) + } + + var err error + if p.Header, err = parsePacketHeader(r); err != nil { + return nil, fmt.Errorf("parsing packet header failed: %w", err) } - // Parse adaptation field if p.Header.HasAdaptationField { - if p.AdaptationField, err = parsePacketAdaptationField(i); err != nil { - err = fmt.Errorf("astits: parsing packet adaptation field failed: %w", err) - return + if p.AdaptationField, err = parsePacketAdaptationField(r); err != nil { + return nil, fmt.Errorf("parsing packet adaptation field failed: %w", err) } } - // Build payload if p.Header.HasPayload { - i.Seek(payloadOffset(offsetStart, p.Header, p.AdaptationField)) - p.Payload = i.Dump() - } - return -} + payloadOffset := int64(startOffset+4) * 8 + if p.Header.HasAdaptationField { + payloadOffset += int64(1+p.AdaptationField.Length) * 8 + } -// payloadOffset returns the payload offset -func payloadOffset(offsetStart int, h *PacketHeader, a *PacketAdaptationField) (offset int) { - offset = offsetStart + 3 - if h.HasAdaptationField { - offset += 1 + a.Length + skip := make([]byte, (payloadOffset-r.BitsCount)/8) + TryReadFull(r, skip) + + if r.TryError != nil { + return nil, fmt.Errorf("x %v : %w", (pktLength-r.BitsCount)/8, r.TryError) + } + // Read payload. + p.Payload = make([]byte, (pktLength-r.BitsCount)/8) + TryReadFull(r, p.Payload) } - return + if r.TryError != nil { + return nil, fmt.Errorf("y %v : %w", (pktLength-r.BitsCount)/8, r.TryError) + } + return p, r.TryError } -// parsePacketHeader parses the packet header -func parsePacketHeader(i *astikit.BytesIterator) (h *PacketHeader, err error) { - // Get next bytes - var bs []byte - if bs, err = i.NextBytesNoCopy(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return +// parsePacketHeader parses the packet header. +func parsePacketHeader(r *bitio.CountReader) (*PacketHeader, error) { + h := &PacketHeader{ + TransportErrorIndicator: r.TryReadBool(), + PayloadUnitStartIndicator: r.TryReadBool(), + TransportPriority: r.TryReadBool(), + PID: uint16(r.TryReadBits(13)), } - // Create header - h = &PacketHeader{ - ContinuityCounter: uint8(bs[2] & 0xf), - HasAdaptationField: bs[2]&0x20 > 0, - HasPayload: bs[2]&0x10 > 0, - PayloadUnitStartIndicator: bs[0]&0x40 > 0, - PID: uint16(bs[0]&0x1f)<<8 | uint16(bs[1]), - TransportErrorIndicator: bs[0]&0x80 > 0, - TransportPriority: bs[0]&0x20 > 0, - TransportScramblingControl: uint8(bs[2]) >> 6 & 0x3, - } - return + h.TransportScramblingControl = uint8(r.TryReadBits(2)) + h.HasAdaptationField = r.TryReadBool() + h.HasPayload = r.TryReadBool() + h.ContinuityCounter = uint8(r.TryReadBits(4)) + + return h, r.TryError } -// parsePacketAdaptationField parses the packet adaptation field -func parsePacketAdaptationField(i *astikit.BytesIterator) (a *PacketAdaptationField, err error) { - // Create adaptation field - a = &PacketAdaptationField{} +// parsePacketAdaptationField parses the packet adaptation field. +func parsePacketAdaptationField(r *bitio.CountReader) (*PacketAdaptationField, error) { //nolint:funlen + a := &PacketAdaptationField{} - // Get next byte - var b byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + a.Length = int(r.TryReadByte()) - // Length - a.Length = int(b) + afStartOffset := r.BitsCount - afStartOffset := i.Offset() + // Invalid length. + if a.Length <= 0 { + a.StuffingLength = a.Length - int(r.BitsCount-afStartOffset/8)/8 + return a, nil + } - // Valid length - if a.Length > 0 { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } + a.DiscontinuityIndicator = r.TryReadBool() + a.RandomAccessIndicator = r.TryReadBool() + a.ElementaryStreamPriorityIndicator = r.TryReadBool() + a.HasPCR = r.TryReadBool() + a.HasOPCR = r.TryReadBool() + a.HasSplicingCountdown = r.TryReadBool() + a.HasTransportPrivateData = r.TryReadBool() + a.HasAdaptationExtensionField = r.TryReadBool() - // Flags - a.DiscontinuityIndicator = b&0x80 > 0 - a.RandomAccessIndicator = b&0x40 > 0 - a.ElementaryStreamPriorityIndicator = b&0x20 > 0 - a.HasPCR = b&0x10 > 0 - a.HasOPCR = b&0x08 > 0 - a.HasSplicingCountdown = b&0x04 > 0 - a.HasTransportPrivateData = b&0x02 > 0 - a.HasAdaptationExtensionField = b&0x01 > 0 - - // PCR - if a.HasPCR { - if a.PCR, err = parsePCR(i); err != nil { - err = fmt.Errorf("astits: parsing PCR failed: %w", err) - return - } + var err error + if a.HasPCR { + if a.PCR, err = parsePCR(r); err != nil { + return nil, fmt.Errorf("parsing PCR failed: %w", err) } + } - // OPCR - if a.HasOPCR { - if a.OPCR, err = parsePCR(i); err != nil { - err = fmt.Errorf("astits: parsing PCR failed: %w", err) - return - } + if a.HasOPCR { + if a.OPCR, err = parsePCR(r); err != nil { + return nil, fmt.Errorf("parsing OPCR failed: %w", err) } + } - // Splicing countdown - if a.HasSplicingCountdown { - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - a.SpliceCountdown = int(b) - } + if a.HasSplicingCountdown { + a.SpliceCountdown = r.TryReadByte() + } + + if a.HasTransportPrivateData { + a.TransportPrivateDataLength = r.TryReadByte() - // Transport private data - if a.HasTransportPrivateData { - // Length - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - a.TransportPrivateDataLength = int(b) - - // Data - if a.TransportPrivateDataLength > 0 { - if a.TransportPrivateData, err = i.NextBytes(a.TransportPrivateDataLength); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - } + if a.TransportPrivateDataLength > 0 { + a.TransportPrivateData = make([]byte, a.TransportPrivateDataLength) + TryReadFull(r, a.TransportPrivateData) } + } + + if !a.HasAdaptationExtensionField { + a.StuffingLength = a.Length - int(r.BitsCount-afStartOffset)/8 + return a, nil + } + + a.AdaptationExtensionField = &PacketAdaptationExtensionField{} + + a.AdaptationExtensionField.Length = r.TryReadByte() + if a.AdaptationExtensionField.Length <= 0 { + a.StuffingLength = a.Length - int(r.BitsCount-afStartOffset)/8 + return a, nil + } + + a.AdaptationExtensionField.HasLegalTimeWindow = r.TryReadBool() + a.AdaptationExtensionField.HasPiecewiseRate = r.TryReadBool() + a.AdaptationExtensionField.HasSeamlessSplice = r.TryReadBool() + _ = r.TryReadBits(5) // Reserved. - // Adaptation extension - if a.HasAdaptationExtensionField { - // Create extension field - a.AdaptationExtensionField = &PacketAdaptationExtensionField{} - - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Length - a.AdaptationExtensionField.Length = int(b) - if a.AdaptationExtensionField.Length > 0 { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Basic - a.AdaptationExtensionField.HasLegalTimeWindow = b&0x80 > 0 - a.AdaptationExtensionField.HasPiecewiseRate = b&0x40 > 0 - a.AdaptationExtensionField.HasSeamlessSplice = b&0x20 > 0 - - // Legal time window - if a.AdaptationExtensionField.HasLegalTimeWindow { - var bs []byte - if bs, err = i.NextBytesNoCopy(2); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - a.AdaptationExtensionField.LegalTimeWindowIsValid = bs[0]&0x80 > 0 - a.AdaptationExtensionField.LegalTimeWindowOffset = uint16(bs[0]&0x7f)<<8 | uint16(bs[1]) - } - - // Piecewise rate - if a.AdaptationExtensionField.HasPiecewiseRate { - var bs []byte - if bs, err = i.NextBytesNoCopy(3); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - a.AdaptationExtensionField.PiecewiseRate = uint32(bs[0]&0x3f)<<16 | uint32(bs[1])<<8 | uint32(bs[2]) - } - - // Seamless splice - if a.AdaptationExtensionField.HasSeamlessSplice { - // Get next byte - if b, err = i.NextByte(); err != nil { - err = fmt.Errorf("astits: fetching next byte failed: %w", err) - return - } - - // Splice type - a.AdaptationExtensionField.SpliceType = uint8(b&0xf0) >> 4 - - // We need to rewind since the current byte is used by the DTS next access unit as well - i.Skip(-1) - - // DTS Next access unit - if a.AdaptationExtensionField.DTSNextAccessUnit, err = parsePTSOrDTS(i); err != nil { - err = fmt.Errorf("astits: parsing DTS failed: %w", err) - return - } - } - } + if a.AdaptationExtensionField.HasLegalTimeWindow { + a.AdaptationExtensionField.LegalTimeWindowIsValid = r.TryReadBool() + a.AdaptationExtensionField.LegalTimeWindowOffset = uint16(r.TryReadBits(15)) + } + + if a.AdaptationExtensionField.HasPiecewiseRate { + _ = r.TryReadBits(2) // Reserved. + a.AdaptationExtensionField.PiecewiseRate = uint32(r.TryReadBits(22)) + } + + if a.AdaptationExtensionField.HasSeamlessSplice { + a.AdaptationExtensionField.SpliceType = uint8(r.TryReadBits(4)) + + a.AdaptationExtensionField.DTSNextAccessUnit, err = parsePTSOrDTS(r) + if err != nil { + return nil, fmt.Errorf("parsing DTSNextAccessUnit failed: %w", err) } } - a.StuffingLength = a.Length - (i.Offset() - afStartOffset) + a.StuffingLength = a.Length - int(r.BitsCount-afStartOffset)/8 - return + return a, r.TryError } // parsePCR parses a Program Clock Reference -// Program clock reference, stored as 33 bits base, 6 bits reserved, 9 bits extension. -func parsePCR(i *astikit.BytesIterator) (cr *ClockReference, err error) { - var bs []byte - if bs, err = i.NextBytesNoCopy(6); err != nil { - err = fmt.Errorf("astits: fetching next bytes failed: %w", err) - return - } - pcr := uint64(bs[0])<<40 | uint64(bs[1])<<32 | uint64(bs[2])<<24 | uint64(bs[3])<<16 | uint64(bs[4])<<8 | uint64(bs[5]) - cr = newClockReference(int64(pcr>>15), int64(pcr&0x1ff)) - return +// Program clock reference, stored as 33 bits base, +// 6 bits reserved, 9 bits extension. +func parsePCR(r *bitio.CountReader) (*ClockReference, error) { + base := int64(r.TryReadBits(33)) + _ = r.TryReadBits(6) // Reserved. + ext := int64(r.TryReadBits(9)) + + return newClockReference(base, ext), r.TryError } -func writePacket(w *astikit.BitsWriter, p *Packet, targetPacketSize int) (written int, retErr error) { - if retErr = w.Write(uint8(syncByte)); retErr != nil { +// ErrShortPayload . +var ErrShortPayload = errors.New("short payload") + +func writePacket(w *bitio.Writer, p *Packet, targetPacketSize int) (written int, err error) { + if err = w.WriteByte(uint8(syncByte)); err != nil { return } - written += 1 + written++ - n, retErr := writePacketHeader(w, p.Header) - if retErr != nil { + n, err := writePacketHeader(w, p.Header) + if err != nil { return } written += n if p.Header.HasAdaptationField { - n, retErr = writePacketAdaptationField(w, p.AdaptationField) - if retErr != nil { + n, err = writePacketAdaptationField(w, p.AdaptationField) + if err != nil { return } written += n @@ -337,22 +328,23 @@ func writePacket(w *astikit.BitsWriter, p *Packet, targetPacketSize int) (writte if targetPacketSize-written < len(p.Payload) { return 0, fmt.Errorf( - "writePacket: can't write %d bytes of payload: only %d is available", + "%w: payload=%d available=%d", + ErrShortPayload, len(p.Payload), targetPacketSize-written, ) } if p.Header.HasPayload { - retErr = w.Write(p.Payload) - if retErr != nil { + _, err = w.Write(p.Payload) + if err != nil { return } written += len(p.Payload) } for written < targetPacketSize { - if retErr = w.Write(uint8(0xff)); retErr != nil { + if err = w.WriteByte(uint8(0xff)); err != nil { return } written++ @@ -361,28 +353,24 @@ func writePacket(w *astikit.BitsWriter, p *Packet, targetPacketSize int) (writte return written, nil } -func writePacketHeader(w *astikit.BitsWriter, h *PacketHeader) (written int, retErr error) { - b := astikit.NewBitsWriterBatch(w) - - b.Write(h.TransportErrorIndicator) - b.Write(h.PayloadUnitStartIndicator) - b.Write(h.TransportPriority) - b.WriteN(h.PID, 13) - b.WriteN(h.TransportScramblingControl, 2) - b.Write(h.HasAdaptationField) // adaptation_field_control higher bit - b.Write(h.HasPayload) // adaptation_field_control lower bit - b.WriteN(h.ContinuityCounter, 4) - - return mpegTsPacketHeaderSize, b.Err() +func writePacketHeader(w *bitio.Writer, h *PacketHeader) (written int, retErr error) { + w.TryWriteBool(h.TransportErrorIndicator) + w.TryWriteBool(h.PayloadUnitStartIndicator) + w.TryWriteBool(h.TransportPriority) + w.TryWriteBits(uint64(h.PID), 13) + w.TryWriteBits(uint64(h.TransportScramblingControl), 2) + w.TryWriteBool(h.HasAdaptationField) // adaptation_field_control higher bit. + w.TryWriteBool(h.HasPayload) // adaptation_field_control lower bit. + w.TryWriteBits(uint64(h.ContinuityCounter), 4) + + return mpegTsPacketHeaderSize, w.TryError } -func writePCR(w *astikit.BitsWriter, cr *ClockReference) (int, error) { - b := astikit.NewBitsWriterBatch(w) - - b.WriteN(uint64(cr.Base), 33) - b.WriteN(uint8(0xff), 6) - b.WriteN(uint64(cr.Extension), 9) - return pcrBytesSize, b.Err() +func writePCR(w *bitio.Writer, cr *ClockReference) (int, error) { + w.TryWriteBits(uint64(cr.Base), 33) + w.TryWriteBits(0xff, 6) + w.TryWriteBits(uint64(cr.Extension), 9) + return pcrBytesSize, w.TryError } func calcPacketAdaptationFieldLength(af *PacketAdaptationField) (length uint8) { @@ -406,26 +394,26 @@ func calcPacketAdaptationFieldLength(af *PacketAdaptationField) (length uint8) { return } -func writePacketAdaptationField(w *astikit.BitsWriter, af *PacketAdaptationField) (bytesWritten int, retErr error) { - b := astikit.NewBitsWriterBatch(w) +func writePacketAdaptationField(w *bitio.Writer, af *PacketAdaptationField) (int, error) { //nolint:funlen + var bytesWritten int if af.IsOneByteStuffing { - b.Write(uint8(0)) + w.TryWriteByte(0) return 1, nil } length := calcPacketAdaptationFieldLength(af) - b.Write(length) + w.TryWriteByte(length) bytesWritten++ - b.Write(af.DiscontinuityIndicator) - b.Write(af.RandomAccessIndicator) - b.Write(af.ElementaryStreamPriorityIndicator) - b.Write(af.HasPCR) - b.Write(af.HasOPCR) - b.Write(af.HasSplicingCountdown) - b.Write(af.HasTransportPrivateData) - b.Write(af.HasAdaptationExtensionField) + w.TryWriteBool(af.DiscontinuityIndicator) + w.TryWriteBool(af.RandomAccessIndicator) + w.TryWriteBool(af.ElementaryStreamPriorityIndicator) + w.TryWriteBool(af.HasPCR) + w.TryWriteBool(af.HasOPCR) + w.TryWriteBool(af.HasSplicingCountdown) + w.TryWriteBool(af.HasTransportPrivateData) + w.TryWriteBool(af.HasAdaptationExtensionField) bytesWritten++ @@ -446,16 +434,16 @@ func writePacketAdaptationField(w *astikit.BitsWriter, af *PacketAdaptationField } if af.HasSplicingCountdown { - b.Write(uint8(af.SpliceCountdown)) + w.TryWriteByte(af.SpliceCountdown) bytesWritten++ } if af.HasTransportPrivateData { // we can get length from TransportPrivateData itself, why do we need separate field? - b.Write(uint8(af.TransportPrivateDataLength)) + w.TryWriteByte(af.TransportPrivateDataLength) bytesWritten++ if af.TransportPrivateDataLength > 0 { - b.Write(af.TransportPrivateData) + w.TryWrite(af.TransportPrivateData) } bytesWritten += len(af.TransportPrivateData) } @@ -470,15 +458,15 @@ func writePacketAdaptationField(w *astikit.BitsWriter, af *PacketAdaptationField // stuffing for i := 0; i < af.StuffingLength; i++ { - b.Write(uint8(0xff)) + w.TryWriteByte(0xff) bytesWritten++ } - - retErr = b.Err() - return + return bytesWritten, w.TryError } -func calcPacketAdaptationFieldExtensionLength(afe *PacketAdaptationExtensionField) (length uint8) { +func calcPacketAdaptationFieldExtensionLength( + afe *PacketAdaptationExtensionField, +) (length uint8) { length++ if afe.HasLegalTimeWindow { length += 2 @@ -492,28 +480,30 @@ func calcPacketAdaptationFieldExtensionLength(afe *PacketAdaptationExtensionFiel return length } -func writePacketAdaptationFieldExtension(w *astikit.BitsWriter, afe *PacketAdaptationExtensionField) (bytesWritten int, retErr error) { - b := astikit.NewBitsWriterBatch(w) +func writePacketAdaptationFieldExtension( + w *bitio.Writer, afe *PacketAdaptationExtensionField, +) (int, error) { + var bytesWritten int length := calcPacketAdaptationFieldExtensionLength(afe) - b.Write(length) + w.TryWriteByte(length) bytesWritten++ - b.Write(afe.HasLegalTimeWindow) - b.Write(afe.HasPiecewiseRate) - b.Write(afe.HasSeamlessSplice) - b.WriteN(uint8(0xff), 5) // reserved + w.TryWriteBool(afe.HasLegalTimeWindow) + w.TryWriteBool(afe.HasPiecewiseRate) + w.TryWriteBool(afe.HasSeamlessSplice) + w.TryWriteBits(0xff, 5) // reserved bytesWritten++ if afe.HasLegalTimeWindow { - b.Write(afe.LegalTimeWindowIsValid) - b.WriteN(afe.LegalTimeWindowOffset, 15) + w.TryWriteBool(afe.LegalTimeWindowIsValid) + w.TryWriteBits(uint64(afe.LegalTimeWindowOffset), 15) bytesWritten += 2 } if afe.HasPiecewiseRate { - b.WriteN(uint8(0xff), 2) - b.WriteN(afe.PiecewiseRate, 22) + w.TryWriteBits(0xff, 2) + w.TryWriteBits(uint64(afe.PiecewiseRate), 22) bytesWritten += 3 } @@ -525,8 +515,7 @@ func writePacketAdaptationFieldExtension(w *astikit.BitsWriter, afe *PacketAdapt bytesWritten += n } - retErr = b.Err() - return + return bytesWritten, w.TryError } func newStuffingAdaptationField(bytesToStuff int) *PacketAdaptationField { diff --git a/packet_buffer.go b/packet_buffer.go index db3cf3a..92ce8c9 100644 --- a/packet_buffer.go +++ b/packet_buffer.go @@ -2,20 +2,22 @@ package astits import ( "bufio" + "bytes" + "errors" "fmt" "io" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" ) -// packetBuffer represents a packet buffer +// packetBuffer represents a packet buffer. type packetBuffer struct { packetSize int r io.Reader packetReadBuffer []byte } -// newPacketBuffer creates a new packet buffer +// newPacketBuffer creates a new packet buffer. func newPacketBuffer(r io.Reader, packetSize int) (pb *packetBuffer, err error) { // Init pb = &packetBuffer{ @@ -23,68 +25,71 @@ func newPacketBuffer(r io.Reader, packetSize int) (pb *packetBuffer, err error) r: r, } - // Packet size is not set + // Packet size is not set. if pb.packetSize == 0 { - // Auto detect packet size + // Auto detect packet size. if pb.packetSize, err = autoDetectPacketSize(r); err != nil { - err = fmt.Errorf("astits: auto detecting packet size failed: %w", err) + err = fmt.Errorf("auto detecting packet size failed: %w", err) return } } return } +// ErrSingleSyncByte . +var ErrSingleSyncByte = errors.New("only one sync byte detected") + // autoDetectPacketSize updates the packet size based on the first bytes // Minimum packet size is 188 and is bounded by 2 sync bytes -// Assumption is made that the first byte of the reader is a sync byte -func autoDetectPacketSize(r io.Reader) (packetSize int, err error) { +// Assumption is made that the first byte of the reader is a sync byte. +func autoDetectPacketSize(r io.Reader) (int, error) { // Read first bytes const l = 193 - var b = make([]byte, l) - shouldRewind, rerr := peek(r, b) - if rerr != nil { - err = fmt.Errorf("astits: reading first %d bytes failed: %w", l, rerr) - return + b := make([]byte, l) + shouldRewind, err := peek(r, b) + if err != nil { + return 0, fmt.Errorf("reading first %d bytes failed: %w", l, err) } - // Packet must start with a sync byte + // Packet must start with a sync byte. if b[0] != syncByte { - err = ErrPacketMustStartWithASyncByte - return + return 0, ErrPacketStartSyncByte } - // Look for sync bytes + var packetSize int + // Look for sync bytes. for idx, b := range b { - if b == syncByte && idx >= MpegTsPacketSize { - // Update packet size - packetSize = idx + if b != syncByte || idx < MpegTsPacketSize { + continue + } - if !shouldRewind { - return - } + // Update packet size. + packetSize = idx + + if !shouldRewind { + return packetSize, nil + } - // Rewind or sync reader - var n int64 - if n, err = rewind(r); err != nil { - err = fmt.Errorf("astits: rewinding failed: %w", err) - return - } else if n == -1 { - var ls = packetSize - (l - packetSize) - if _, err = r.Read(make([]byte, ls)); err != nil { - err = fmt.Errorf("astits: reading %d bytes to sync reader failed: %w", ls, err) - return - } + // Rewind or sync reader. + var n int64 + if n, err = rewind(r); err != nil { + return 0, fmt.Errorf("rewinding failed: %w", err) + } else if n == -1 { + ls := packetSize - (l - packetSize) + _, err := r.Read(make([]byte, ls)) + if err != nil { + return 0, fmt.Errorf("reading %d bytes to sync reader failed: %w", ls, err) } - return } + return packetSize, nil } - err = fmt.Errorf("astits: only one sync byte detected in first %d bytes", l) - return + return 0, fmt.Errorf("%w in first %d bytes", ErrSingleSyncByte, l) } -// bufio.Reader can't be rewinded, which leads to packet loss on packet size autodetection -// but it has handy Peek() method -// so what we do here is peeking bytes for bufio.Reader and falling back to rewinding/syncing for all other readers +// peek bufio.Reader can't be rewinded, which leads to packet +// loss on packet size autodetection but it has handy Peek() +// method so what we do here is peeking bytes for bufio.Reader +// and falling back to rewinding/syncing for all other readers. func peek(r io.Reader, b []byte) (shouldRewind bool, err error) { if br, ok := r.(*bufio.Reader); ok { var bs []byte @@ -101,11 +106,11 @@ func peek(r io.Reader, b []byte) (shouldRewind bool, err error) { return } -// rewind rewinds the reader if possible, otherwise n = -1 +// rewind rewinds the reader if possible, otherwise n = -1 . func rewind(r io.Reader) (n int64, err error) { if s, ok := r.(io.Seeker); ok { if n, err = s.Seek(0, 0); err != nil { - err = fmt.Errorf("astits: seeking to 0 failed: %w", err) + err = fmt.Errorf("seeking to 0 failed: %w", err) return } return @@ -114,26 +119,29 @@ func rewind(r io.Reader) (n int64, err error) { return } -// next fetches the next packet from the buffer -func (pb *packetBuffer) next() (p *Packet, err error) { +// next fetches the next packet from the buffer. +func (pb *packetBuffer) next() (*Packet, error) { // Read if pb.packetReadBuffer == nil || len(pb.packetReadBuffer) != pb.packetSize { pb.packetReadBuffer = make([]byte, pb.packetSize) } - if _, err = io.ReadFull(pb.r, pb.packetReadBuffer); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - err = ErrNoMorePackets - } else { - err = fmt.Errorf("astits: reading %d bytes failed: %w", pb.packetSize, err) + _, err := io.ReadFull(pb.r, pb.packetReadBuffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return nil, io.EOF } - return + return nil, fmt.Errorf("reading %d bytes failed: %w", pb.packetSize, err) } - // Parse packet - if p, err = parsePacket(astikit.NewBytesIterator(pb.packetReadBuffer)); err != nil { - err = fmt.Errorf("astits: building packet failed: %w", err) - return + r := bitio.NewCountReader(bytes.NewReader(pb.packetReadBuffer)) + pktBufferLength := int64(len(pb.packetReadBuffer) * 8) + + // Parse packet. + p, err := parsePacket(r, pktBufferLength) + if err != nil { + return nil, fmt.Errorf("building packet failed: %w", err) } - return + + return p, nil } diff --git a/packet_buffer_test.go b/packet_buffer_test.go index a4b9e35..980a4f7 100644 --- a/packet_buffer_test.go +++ b/packet_buffer_test.go @@ -4,26 +4,26 @@ import ( "bytes" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) func TestAutoDetectPacketSize(t *testing.T) { // Packet should start with a sync byte buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(2)) - w.Write(byte(syncByte)) + w := bitio.NewWriter(buf) + w.WriteByte(uint8(2)) + w.WriteByte(byte(syncByte)) _, err := autoDetectPacketSize(bytes.NewReader(buf.Bytes())) - assert.EqualError(t, err, ErrPacketMustStartWithASyncByte.Error()) + assert.ErrorIs(t, err, ErrPacketStartSyncByte) // Valid packet size buf.Reset() - w.Write(byte(syncByte)) + w.WriteByte(byte(syncByte)) w.Write(make([]byte, 20)) - w.Write(byte(syncByte)) + w.WriteByte(byte(syncByte)) w.Write(make([]byte, 166)) - w.Write(byte(syncByte)) + w.WriteByte(byte(syncByte)) w.Write(make([]byte, 187)) w.Write([]byte("test")) r := bytes.NewReader(buf.Bytes()) diff --git a/packet_pool.go b/packet_pool.go index 27de8b5..6cbe290 100644 --- a/packet_pool.go +++ b/packet_pool.go @@ -5,7 +5,7 @@ import ( "sync" ) -// packetAccumulator keeps track of packets for a single PID and decides when to flush them +// packetAccumulator keeps track of packets for a single PID and decides when to flush them. type packetAccumulator struct { parser PacketsParser pid uint16 @@ -13,7 +13,7 @@ type packetAccumulator struct { q []*Packet } -// newPacketAccumulator creates a new packet queue for a single PID +// newPacketAccumulator creates a new packet queue for a single PID. func newPacketAccumulator(pid uint16, parser PacketsParser, programMap *programMap) *packetAccumulator { return &packetAccumulator{ parser: parser, @@ -22,21 +22,23 @@ func newPacketAccumulator(pid uint16, parser PacketsParser, programMap *programM } } -// add adds a new packet for this PID to the queue -func (b *packetAccumulator) add(p *Packet) (ps []*Packet) { +// add adds a new packet for this PID to the queue. +func (b *packetAccumulator) add(p *Packet) []*Packet { mps := b.q - // Empty buffer if we detect a discontinuity + // Empty buffer if we detect a discontinuity. if hasDiscontinuity(mps, p) { mps = []*Packet{} } - // Throw away packet if it's the same as the previous one + // Throw away packet if it's the same as the previous one. if isSameAsPrevious(mps, p) { - return + return nil } - // Flush buffer if new payload starts here + var ps []*Packet + + // Flush buffer if new payload starts here. if p.Header.PayloadUnitStartIndicator { ps = mps mps = []*Packet{p} @@ -44,10 +46,10 @@ func (b *packetAccumulator) add(p *Packet) (ps []*Packet) { mps = append(mps, p) } - // Check if PSI payload is complete + // Check if PSI payload is complete. if b.programMap != nil && (b.pid == PIDPAT || b.programMap.exists(b.pid)) { - // TODO Use partial data parsing instead + // TODO Use partial data parsing instead. if _, err := parseData(mps, b.parser, b.programMap); err == nil { ps = mps mps = nil @@ -55,10 +57,10 @@ func (b *packetAccumulator) add(p *Packet) (ps []*Packet) { } b.q = mps - return + return ps } -// packetPool represents a queue of packets for each PID in the stream +// packetPool represents a queue of packets for each PID in the stream. type packetPool struct { b map[uint16]*packetAccumulator // Indexed by PID m *sync.Mutex @@ -67,7 +69,7 @@ type packetPool struct { programMap *programMap } -// newPacketPool creates a new packet pool with an optional parser and programMap +// newPacketPool creates a new packet pool with an optional parser and programMap. func newPacketPool(parser PacketsParser, programMap *programMap) *packetPool { return &packetPool{ b: make(map[uint16]*packetAccumulator), @@ -78,33 +80,32 @@ func newPacketPool(parser PacketsParser, programMap *programMap) *packetPool { } } -// add adds a new packet to the pool +// add adds a new packet to the pool. func (b *packetPool) add(p *Packet) (ps []*Packet) { - // Throw away packet if error indicator + // Throw away packet if error indicator. if p.Header.TransportErrorIndicator { return } - // Throw away packets that don't have a payload until we figure out what we're going to do with them + // Throw away packets that don't have a payload until + // we figure out what we're going to do with them // TODO figure out what we're going to do with them :D if !p.Header.HasPayload { return } - // Lock + // Make sure accumulator exists. b.m.Lock() - defer b.m.Unlock() - - // Make sure accumulator exists if _, ok := b.b[p.Header.PID]; !ok { b.b[p.Header.PID] = newPacketAccumulator(p.Header.PID, b.parser, b.programMap) } + b.m.Unlock() - // Add to the accumulator + // Add to the accumulator. return b.b[p.Header.PID].add(p) } -// dump dumps the packet pool by looking for the first item with packets inside +// dump dumps the packet pool by looking for the first item with packets inside. func (b *packetPool) dump() (ps []*Packet) { b.m.Lock() defer b.m.Unlock() @@ -123,14 +124,17 @@ func (b *packetPool) dump() (ps []*Packet) { return } -// hasDiscontinuity checks whether a packet is discontinuous with a set of packets +// hasDiscontinuity checks whether a packet is discontinuous with a set of packets. func hasDiscontinuity(ps []*Packet, p *Packet) bool { return (p.Header.HasAdaptationField && p.AdaptationField.DiscontinuityIndicator) || (len(ps) > 0 && p.Header.HasPayload && p.Header.ContinuityCounter != (ps[len(ps)-1].Header.ContinuityCounter+1)%16) || (len(ps) > 0 && !p.Header.HasPayload && p.Header.ContinuityCounter != ps[len(ps)-1].Header.ContinuityCounter) } -// isSameAsPrevious checks whether a packet is the same as the last packet of a set of packets +// isSameAsPrevious checks whether a packet is the +// same as the last packet of a set of packets. func isSameAsPrevious(ps []*Packet, p *Packet) bool { - return len(ps) > 0 && p.Header.HasPayload && p.Header.ContinuityCounter == ps[len(ps)-1].Header.ContinuityCounter + return len(ps) > 0 && + p.Header.HasPayload && + p.Header.ContinuityCounter == ps[len(ps)-1].Header.ContinuityCounter } diff --git a/packet_pool_test.go b/packet_pool_test.go index 0af9184..d5a8863 100644 --- a/packet_pool_test.go +++ b/packet_pool_test.go @@ -7,11 +7,33 @@ import ( ) func TestHasDiscontinuity(t *testing.T) { - assert.False(t, hasDiscontinuity([]*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, &Packet{Header: &PacketHeader{ContinuityCounter: 0, HasPayload: true}})) - assert.False(t, hasDiscontinuity([]*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, &Packet{Header: &PacketHeader{ContinuityCounter: 15}})) - assert.True(t, hasDiscontinuity([]*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, &Packet{AdaptationField: &PacketAdaptationField{DiscontinuityIndicator: true}, Header: &PacketHeader{ContinuityCounter: 0, HasAdaptationField: true, HasPayload: true}})) - assert.True(t, hasDiscontinuity([]*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, &Packet{Header: &PacketHeader{ContinuityCounter: 1, HasPayload: true}})) - assert.True(t, hasDiscontinuity([]*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, &Packet{Header: &PacketHeader{ContinuityCounter: 0}})) + assert.False( + t, hasDiscontinuity( + []*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, + &Packet{Header: &PacketHeader{ContinuityCounter: 0, HasPayload: true}})) + + assert.False( + t, hasDiscontinuity( + []*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, + &Packet{Header: &PacketHeader{ContinuityCounter: 15}})) + + assert.True( + t, hasDiscontinuity( + []*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, + &Packet{ + AdaptationField: &PacketAdaptationField{DiscontinuityIndicator: true}, + Header: &PacketHeader{ContinuityCounter: 0, HasAdaptationField: true, HasPayload: true}, + })) + + assert.True( + t, hasDiscontinuity( + []*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, + &Packet{Header: &PacketHeader{ContinuityCounter: 1, HasPayload: true}})) + + assert.True( + t, hasDiscontinuity( + []*Packet{{Header: &PacketHeader{ContinuityCounter: 15}}}, + &Packet{Header: &PacketHeader{ContinuityCounter: 0}})) } func TestIsSameAsPrevious(t *testing.T) { diff --git a/packet_test.go b/packet_test.go index ad0a587..38874f1 100644 --- a/packet_test.go +++ b/packet_test.go @@ -5,20 +5,20 @@ import ( "fmt" "testing" - "github.com/asticode/go-astikit" + "github.com/icza/bitio" "github.com/stretchr/testify/assert" ) func packet(h PacketHeader, a PacketAdaptationField, i []byte, packet192bytes bool) ([]byte, *Packet) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(syncByte)) // Sync byte + w := bitio.NewWriter(buf) + w.WriteByte(uint8(syncByte)) // Sync byte if packet192bytes { w.Write([]byte("test")) // Sometimes packets are 192 bytes } - w.Write(packetHeaderBytes(h, "11")) // Header - w.Write(packetAdaptationFieldBytes(a)) // Adaptation field - var payload = append(i, bytes.Repeat([]byte{0}, 147-len(i))...) // Payload + w.Write(packetHeaderBytes(h, "11")) // Header + w.Write(packetAdaptationFieldBytes(a)) // Adaptation field + payload := append(i, bytes.Repeat([]byte{0}, 147-len(i))...) // Payload w.Write(payload) return buf.Bytes(), &Packet{ AdaptationField: packetAdaptationField, @@ -29,8 +29,8 @@ func packet(h PacketHeader, a PacketAdaptationField, i []byte, packet192bytes bo func packetShort(h PacketHeader, payload []byte) ([]byte, *Packet) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(syncByte)) // Sync byte + w := bitio.NewWriter(buf) + w.WriteByte(uint8(syncByte)) // Sync byte w.Write(packetHeaderBytes(h, "01")) // Header p := append(payload, bytes.Repeat([]byte{0}, MpegTsPacketSize-buf.Len())...) w.Write(p) @@ -43,27 +43,24 @@ func packetShort(h PacketHeader, payload []byte) ([]byte, *Packet) { func TestParsePacket(t *testing.T) { // Packet not starting with a sync buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint16(1)) // Invalid sync byte - _, err := parsePacket(astikit.NewBytesIterator(buf.Bytes())) - assert.EqualError(t, err, ErrPacketMustStartWithASyncByte.Error()) + w := bitio.NewWriter(buf) + w.WriteBits(1, 16) // Invalid sync byte + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + _, err := parsePacket(r, int64(len(buf.Bytes())*8)) + assert.ErrorIs(t, err, ErrPacketStartSyncByte) // Valid b, ep := packet(*packetHeader, *packetAdaptationField, []byte("payload"), true) - p, err := parsePacket(astikit.NewBytesIterator(b)) + r = bitio.NewCountReader(bytes.NewReader(b)) + p, err := parsePacket(r, int64(len(b)*8)) assert.NoError(t, err) assert.Equal(t, p, ep) } -func TestPayloadOffset(t *testing.T) { - assert.Equal(t, 3, payloadOffset(0, &PacketHeader{}, nil)) - assert.Equal(t, 7, payloadOffset(1, &PacketHeader{HasAdaptationField: true}, &PacketAdaptationField{Length: 2})) -} - func TestWritePacket(t *testing.T) { eb, ep := packet(*packetHeader, *packetAdaptationField, []byte("payload"), false) buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writePacket(w, ep, MpegTsPacketSize) assert.NoError(t, err) assert.Equal(t, MpegTsPacketSize, n) @@ -79,7 +76,7 @@ func TestWritePacket_HeaderOnly(t *testing.T) { _, ep := packetShort(shortPacketHeader, nil) buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) n, err := writePacket(w, ep, MpegTsPacketSize) assert.NoError(t, err) @@ -88,8 +85,8 @@ func TestWritePacket_HeaderOnly(t *testing.T) { // we can't just compare bytes returned by packetShort since they're not completely correct, // so we just cross-check writePacket with parsePacket - i := astikit.NewBytesIterator(buf.Bytes()) - p, err := parsePacket(i) + r := bitio.NewCountReader(bytes.NewReader(buf.Bytes())) + p, err := parsePacket(r, int64(len(buf.Bytes())*8)) assert.NoError(t, err) assert.Equal(t, ep, p) } @@ -107,26 +104,28 @@ var packetHeader = &PacketHeader{ func packetHeaderBytes(h PacketHeader, afControl string) []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(h.TransportErrorIndicator) // Transport error indicator - w.Write(h.PayloadUnitStartIndicator) // Payload unit start indicator - w.Write("1") // Transport priority - w.Write(fmt.Sprintf("%.13b", h.PID)) // PID - w.Write("10") // Scrambling control - w.Write(afControl) // Adaptation field control - w.Write(fmt.Sprintf("%.4b", h.ContinuityCounter)) // Continuity counter + w := bitio.NewWriter(buf) + w.WriteBool(h.TransportErrorIndicator) // Transport error indicator + w.WriteBool(h.PayloadUnitStartIndicator) // Payload unit start indicator + WriteBinary(w, "1") // Transport priority + WriteBinary(w, fmt.Sprintf("%.13b", h.PID)) // PID + WriteBinary(w, "10") // Scrambling control + WriteBinary(w, afControl) // Adaptation field control + WriteBinary(w, fmt.Sprintf("%.4b", h.ContinuityCounter)) // Continuity counter return buf.Bytes() } func TestParsePacketHeader(t *testing.T) { - v, err := parsePacketHeader(astikit.NewBytesIterator(packetHeaderBytes(*packetHeader, "11"))) + bs := packetHeaderBytes(*packetHeader, "11") + r := bitio.NewCountReader(bytes.NewReader(bs)) + v, err := parsePacketHeader(r) assert.Equal(t, packetHeader, v) assert.NoError(t, err) } func TestWritePacketHeader(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) bytesWritten, err := writePacketHeader(w, packetHeader) assert.NoError(t, err) assert.Equal(t, bytesWritten, 3) @@ -165,44 +164,46 @@ var packetAdaptationField = &PacketAdaptationField{ func packetAdaptationFieldBytes(a PacketAdaptationField) []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write(uint8(36)) // Length - w.Write(a.DiscontinuityIndicator) // Discontinuity indicator - w.Write("1") // Random access indicator - w.Write("1") // Elementary stream priority indicator - w.Write("1") // PCR flag - w.Write("1") // OPCR flag - w.Write("1") // Splicing point flag - w.Write("1") // Transport data flag - w.Write("1") // Adaptation field extension flag - w.Write(pcrBytes()) // PCR - w.Write(pcrBytes()) // OPCR - w.Write(uint8(2)) // Splice countdown - w.Write(uint8(4)) // Transport private data length - w.Write([]byte("test")) // Transport private data - w.Write(uint8(11)) // Adaptation extension length - w.Write("1") // LTW flag - w.Write("1") // Piecewise rate flag - w.Write("1") // Seamless splice flag - w.Write("11111") // Reserved - w.Write("1") // LTW valid flag - w.Write("010101010101010") // LTW offset - w.Write("11") // Piecewise rate reserved - w.Write("1010101010101010101010") // Piecewise rate - w.Write(dtsBytes("0010")) // Splice type + DTS next access unit - w.WriteN(^uint64(0), 40) // Stuffing bytes + w := bitio.NewWriter(buf) + w.WriteByte(uint8(36)) // Length + w.WriteBool(a.DiscontinuityIndicator) // Discontinuity indicator + WriteBinary(w, "1") // Random access indicator + WriteBinary(w, "1") // Elementary stream priority indicator + WriteBinary(w, "1") // PCR flag + WriteBinary(w, "1") // OPCR flag + WriteBinary(w, "1") // Splicing point flag + WriteBinary(w, "1") // Transport data flag + WriteBinary(w, "1") // Adaptation field extension flag + w.Write(pcrBytes()) // PCR + w.Write(pcrBytes()) // OPCR + w.WriteByte(uint8(2)) // Splice countdown + w.WriteByte(uint8(4)) // Transport private data length + w.Write([]byte("test")) // Transport private data + w.WriteByte(uint8(11)) // Adaptation extension length + WriteBinary(w, "1") // LTW flag + WriteBinary(w, "1") // Piecewise rate flag + WriteBinary(w, "1") // Seamless splice flag + WriteBinary(w, "11111") // Reserved + WriteBinary(w, "1") // LTW valid flag + WriteBinary(w, "010101010101010") // LTW offset + WriteBinary(w, "11") // Piecewise rate reserved + WriteBinary(w, "1010101010101010101010") // Piecewise rate + w.Write(dtsBytes("0010")) // Splice type + DTS next access unit + w.WriteBits(^uint64(0), 40) // Stuffing bytes return buf.Bytes() } func TestParsePacketAdaptationField(t *testing.T) { - v, err := parsePacketAdaptationField(astikit.NewBytesIterator(packetAdaptationFieldBytes(*packetAdaptationField))) + bs := packetAdaptationFieldBytes(*packetAdaptationField) + r := bitio.NewCountReader(bytes.NewReader(bs)) + v, err := parsePacketAdaptationField(r) assert.Equal(t, packetAdaptationField, v) assert.NoError(t, err) } func TestWritePacketAdaptationField(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) eb := packetAdaptationFieldBytes(*packetAdaptationField) bytesWritten, err := writePacketAdaptationField(w, packetAdaptationField) assert.NoError(t, err) @@ -218,22 +219,23 @@ var pcr = &ClockReference{ func pcrBytes() []byte { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) - w.Write("101010101010101010101010101010101") // Base - w.Write("111111") // Reserved - w.Write("101010101") // Extension + w := bitio.NewWriter(buf) + WriteBinary(w, "101010101010101010101010101010101") // Base + WriteBinary(w, "111111") // Reserved + WriteBinary(w, "101010101") // Extension return buf.Bytes() } func TestParsePCR(t *testing.T) { - v, err := parsePCR(astikit.NewBytesIterator(pcrBytes())) + r := bitio.NewCountReader(bytes.NewReader(pcrBytes())) + v, err := parsePCR(r) assert.Equal(t, pcr, v) assert.NoError(t, err) } func TestWritePCR(t *testing.T) { buf := &bytes.Buffer{} - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) bytesWritten, err := writePCR(w, pcr) assert.NoError(t, err) assert.Equal(t, bytesWritten, 6) @@ -244,7 +246,7 @@ func TestWritePCR(t *testing.T) { func BenchmarkWritePCR(b *testing.B) { buf := &bytes.Buffer{} buf.Grow(6) - w := astikit.NewBitsWriter(astikit.BitsWriterOptions{Writer: buf}) + w := bitio.NewWriter(buf) b.ReportAllocs() for i := 0; i < b.N; i++ { @@ -258,6 +260,7 @@ func BenchmarkParsePacket(b *testing.B) { for i := 0; i < b.N; i++ { b.ReportAllocs() - parsePacket(astikit.NewBytesIterator(bs)) + r := bitio.NewCountReader(bytes.NewReader(bs)) + parsePacket(r, int64(len(bs)*8)) } } diff --git a/program_map.go b/program_map.go index d92b94c..150a35e 100644 --- a/program_map.go +++ b/program_map.go @@ -2,13 +2,13 @@ package astits import "sync" -// programMap represents a program ids map +// programMap represents a program ids map. type programMap struct { m *sync.Mutex - p map[uint16]uint16 // map[ProgramMapID]ProgramNumber + p map[uint16]uint16 // map[ProgramMapID]ProgramNumber. } -// newProgramMap creates a new program ids map +// newProgramMap creates a new program ids map. func newProgramMap() *programMap { return &programMap{ m: &sync.Mutex{}, @@ -16,7 +16,7 @@ func newProgramMap() *programMap { } } -// exists checks whether the program with this pid exists +// exists checks whether the program with this pid exists. func (m programMap) exists(pid uint16) (ok bool) { m.m.Lock() defer m.m.Unlock() @@ -24,19 +24,13 @@ func (m programMap) exists(pid uint16) (ok bool) { return } -// set sets a new program id +// set sets a new program id. func (m programMap) set(pid, number uint16) { m.m.Lock() defer m.m.Unlock() m.p[pid] = number } -func (m programMap) unset(pid uint16) { - m.m.Lock() - defer m.m.Unlock() - delete(m.p, pid) -} - func (m programMap) toPATData() *PATData { m.m.Lock() defer m.m.Unlock() diff --git a/program_map_test.go b/program_map_test.go index de9b4a0..cc22f76 100644 --- a/program_map_test.go +++ b/program_map_test.go @@ -11,6 +11,4 @@ func TestProgramMap(t *testing.T) { assert.False(t, pm.exists(1)) pm.set(1, 1) assert.True(t, pm.exists(1)) - pm.unset(1) - assert.False(t, pm.exists(1)) }