mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-01-06 04:09:40 -06:00
build(deps): bump github.com/kovidgoyal/imaging from 1.7.2 to 1.8.17
Bumps [github.com/kovidgoyal/imaging](https://github.com/kovidgoyal/imaging) from 1.7.2 to 1.8.17. - [Release notes](https://github.com/kovidgoyal/imaging/releases) - [Changelog](https://github.com/kovidgoyal/imaging/blob/master/.goreleaser.yaml) - [Commits](https://github.com/kovidgoyal/imaging/compare/v1.7.2...v1.8.17) --- updated-dependencies: - dependency-name: github.com/kovidgoyal/imaging dependency-version: 1.8.17 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
Ralf Haferkamp
parent
a1862a65d9
commit
85361fca67
74
vendor/github.com/kovidgoyal/go-parallel/parallel.go
generated
vendored
74
vendor/github.com/kovidgoyal/go-parallel/parallel.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
@@ -99,16 +100,17 @@ func Run_in_parallel_over_range(num_procs int, f func(int, int), start, limit in
|
||||
f(start, limit)
|
||||
return
|
||||
}
|
||||
err_once := sync.Once{}
|
||||
chunk_sz := max(1, num_items/num_procs)
|
||||
var wg sync.WaitGroup
|
||||
echan := make(chan error, num_items/chunk_sz+1)
|
||||
for start < limit {
|
||||
end := min(start+chunk_sz, limit)
|
||||
wg.Add(1)
|
||||
go func(start, end int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
echan <- Format_stacktrace_on_panic(r, 1)
|
||||
perr := Format_stacktrace_on_panic(r, 1)
|
||||
err_once.Do(func() { err = perr })
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -117,10 +119,6 @@ func Run_in_parallel_over_range(num_procs int, f func(int, int), start, limit in
|
||||
start = end
|
||||
}
|
||||
wg.Wait()
|
||||
close(echan)
|
||||
for qerr := range echan {
|
||||
return qerr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -145,27 +143,77 @@ func Run_in_parallel_over_range_with_error(num_procs int, f func(int, int) error
|
||||
}
|
||||
chunk_sz := max(1, num_items/num_procs)
|
||||
var wg sync.WaitGroup
|
||||
echan := make(chan error, num_items/chunk_sz+1)
|
||||
err_once := sync.Once{}
|
||||
for start < limit {
|
||||
end := min(start+chunk_sz, limit)
|
||||
wg.Add(1)
|
||||
go func(start, end int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
echan <- Format_stacktrace_on_panic(r, 1)
|
||||
perr := Format_stacktrace_on_panic(r, 1)
|
||||
err_once.Do(func() { err = perr })
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
if cerr := f(start, end); cerr != nil {
|
||||
echan <- cerr
|
||||
err_once.Do(func() { err = cerr })
|
||||
}
|
||||
}(start, end)
|
||||
start = end
|
||||
}
|
||||
wg.Wait()
|
||||
close(echan)
|
||||
for qerr := range echan {
|
||||
return qerr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Run the specified function in parallel until one of them returns true.
|
||||
// The functions are passed a keep_going variable that they should periodically check and return false if it is false.
|
||||
// If any of the functions panic, the panic is turned into a regular error and returned.
|
||||
func Run_in_parallel_to_first_result(num_procs int, f func(start, limit int, keep_going *atomic.Bool) bool, start, limit int) (err error) {
|
||||
var keep_going atomic.Bool
|
||||
keep_going.Store(true)
|
||||
num_items := limit - start
|
||||
if num_procs <= 0 {
|
||||
num_procs = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
num_procs = max(1, min(num_procs, num_items))
|
||||
if num_procs < 2 {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = Format_stacktrace_on_panic(r, 1)
|
||||
}
|
||||
}()
|
||||
f(start, limit, &keep_going)
|
||||
return
|
||||
}
|
||||
chunk_sz := max(1, num_items/num_procs)
|
||||
var wg sync.WaitGroup
|
||||
var err_once sync.Once
|
||||
ch := make(chan bool, num_items/chunk_sz+1)
|
||||
for start < limit {
|
||||
end := min(start+chunk_sz, limit)
|
||||
wg.Add(1)
|
||||
go func(start, end int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
perr := Format_stacktrace_on_panic(r, 1)
|
||||
err_once.Do(func() { err = perr })
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
ch <- f(start, end, &keep_going)
|
||||
}(start, end)
|
||||
start = end
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
for x := range ch {
|
||||
if x {
|
||||
break
|
||||
}
|
||||
}
|
||||
keep_going.Store(false)
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
2
vendor/github.com/kovidgoyal/go-parallel/publish.py
generated
vendored
2
vendor/github.com/kovidgoyal/go-parallel/publish.py
generated
vendored
@@ -5,7 +5,7 @@ import os
|
||||
import subprocess
|
||||
|
||||
|
||||
VERSION = '1.0.1'
|
||||
VERSION = '1.1.1'
|
||||
|
||||
|
||||
def run(*args: str):
|
||||
|
||||
1
vendor/github.com/kovidgoyal/go-shm/.gitignore
generated
vendored
Normal file
1
vendor/github.com/kovidgoyal/go-shm/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
dist
|
||||
2
vendor/github.com/kovidgoyal/go-shm/.goreleaser.yaml
generated
vendored
Normal file
2
vendor/github.com/kovidgoyal/go-shm/.goreleaser.yaml
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
builds:
|
||||
- skip: true
|
||||
28
vendor/github.com/kovidgoyal/go-shm/LICENSE
generated
vendored
Normal file
28
vendor/github.com/kovidgoyal/go-shm/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2025, Kovid Goyal
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
3
vendor/github.com/kovidgoyal/go-shm/README.md
generated
vendored
Normal file
3
vendor/github.com/kovidgoyal/go-shm/README.md
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Tools to create and manage shared memory (POSIX Shared memory) across all Unix
|
||||
variants. Pure Go, no external dependencies. Implements Go versions of
|
||||
shm_open() and shm_unlink() that interoperate with the libc versions.
|
||||
20
vendor/github.com/kovidgoyal/go-shm/fallocate_linux.go
generated
vendored
Normal file
20
vendor/github.com/kovidgoyal/go-shm/fallocate_linux.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// License: GPLv3 Copyright: 2023, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func Fallocate_simple(fd int, size int64) (err error) {
|
||||
for {
|
||||
if err = unix.Fallocate(fd, 0, 0, size); !errors.Is(err, unix.EINTR) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
16
vendor/github.com/kovidgoyal/go-shm/fallocate_other.go
generated
vendored
Normal file
16
vendor/github.com/kovidgoyal/go-shm/fallocate_other.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// License: GPLv3 Copyright: 2023, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
//go:build !linux
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func Fallocate_simple(fd int, size int64) (err error) {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
31
vendor/github.com/kovidgoyal/go-shm/publish.py
generated
vendored
Normal file
31
vendor/github.com/kovidgoyal/go-shm/publish.py
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
VERSION = '1.0.0'
|
||||
|
||||
|
||||
def run(*args: str):
|
||||
cp = subprocess.run(args)
|
||||
if cp.returncode != 0:
|
||||
raise SystemExit(cp.returncode)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
ans = input(f'Publish version \033[91m{VERSION}\033[m (y/n): ')
|
||||
except KeyboardInterrupt:
|
||||
ans = 'n'
|
||||
if ans.lower() != 'y':
|
||||
return
|
||||
os.environ['GITHUB_TOKEN'] = open(os.path.join(os.environ['PENV'], 'github-token')).read().strip().partition(':')[2]
|
||||
run('git', 'tag', '-a', 'v' + VERSION, '-m', f'version {VERSION}')
|
||||
run('git', 'push')
|
||||
run('goreleaser', 'release', '--clean')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
208
vendor/github.com/kovidgoyal/go-shm/shm.go
generated
vendored
Normal file
208
vendor/github.com/kovidgoyal/go-shm/shm.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
var ErrPatternHasSeparator = errors.New("The specified pattern has file path separators in it")
|
||||
var ErrPatternTooLong = errors.New("The specified pattern for the SHM name is too long")
|
||||
|
||||
type ErrNotSupported struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (self *ErrNotSupported) Error() string {
|
||||
return fmt.Sprintf("POSIX shared memory not supported on this platform: with underlying error: %v", self.err)
|
||||
}
|
||||
|
||||
// prefix_and_suffix splits pattern by the last wildcard "*", if applicable,
|
||||
// returning prefix as the part before "*" and suffix as the part after "*".
|
||||
func prefix_and_suffix(pattern string) (prefix, suffix string, err error) {
|
||||
for i := 0; i < len(pattern); i++ {
|
||||
if os.IsPathSeparator(pattern[i]) {
|
||||
return "", "", ErrPatternHasSeparator
|
||||
}
|
||||
}
|
||||
if pos := strings.LastIndexByte(pattern, '*'); pos != -1 {
|
||||
prefix, suffix = pattern[:pos], pattern[pos+1:]
|
||||
} else {
|
||||
prefix = pattern
|
||||
}
|
||||
return prefix, suffix, nil
|
||||
}
|
||||
|
||||
type MMap interface {
|
||||
Close() error
|
||||
Unlink() error
|
||||
Slice() []byte
|
||||
Name() string
|
||||
IsFileSystemBacked() bool
|
||||
FileSystemName() string
|
||||
Stat() (fs.FileInfo, error)
|
||||
Flush() error
|
||||
Seek(offset int64, whence int) (ret int64, err error)
|
||||
Read(b []byte) (n int, err error)
|
||||
Write(b []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type AccessFlags int
|
||||
|
||||
const (
|
||||
READ AccessFlags = iota
|
||||
WRITE
|
||||
COPY
|
||||
)
|
||||
|
||||
func mmap(sz int, access AccessFlags, fd int, off int64) ([]byte, error) {
|
||||
flags := unix.MAP_SHARED
|
||||
prot := unix.PROT_READ
|
||||
switch access {
|
||||
case COPY:
|
||||
prot |= unix.PROT_WRITE
|
||||
flags = unix.MAP_PRIVATE
|
||||
case WRITE:
|
||||
prot |= unix.PROT_WRITE
|
||||
}
|
||||
|
||||
b, err := unix.Mmap(fd, off, sz, prot, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func munmap(s []byte) error {
|
||||
return unix.Munmap(s)
|
||||
}
|
||||
|
||||
func CreateTemp(pattern string, size uint64) (MMap, error) {
|
||||
return create_temp(pattern, size)
|
||||
}
|
||||
|
||||
func truncate_or_unlink(ans *os.File, size uint64, unlink func(string) error) (err error) {
|
||||
fd := int(ans.Fd())
|
||||
sz := int64(size)
|
||||
if err = Fallocate_simple(fd, sz); err != nil {
|
||||
if !errors.Is(err, errors.ErrUnsupported) {
|
||||
return fmt.Errorf("fallocate() failed on fd from shm_open(%s) with size: %d with error: %w", ans.Name(), size, err)
|
||||
}
|
||||
for {
|
||||
if err = unix.Ftruncate(fd, sz); !errors.Is(err, unix.EINTR) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
_ = ans.Close()
|
||||
_ = unlink(ans.Name())
|
||||
return fmt.Errorf("Failed to ftruncate() SHM file %s to size: %d with error: %w", ans.Name(), size, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const NUM_BYTES_FOR_SIZE = 4
|
||||
|
||||
var ErrRegionTooSmall = errors.New("mmaped region too small")
|
||||
|
||||
func WriteWithSize(self MMap, b []byte, at int) error {
|
||||
if len(self.Slice()) < at+len(b)+NUM_BYTES_FOR_SIZE {
|
||||
return ErrRegionTooSmall
|
||||
}
|
||||
binary.BigEndian.PutUint32(self.Slice()[at:], uint32(len(b)))
|
||||
copy(self.Slice()[at+NUM_BYTES_FOR_SIZE:], b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadWithSize(self MMap, at int) ([]byte, error) {
|
||||
s := self.Slice()[at:]
|
||||
if len(s) < NUM_BYTES_FOR_SIZE {
|
||||
return nil, ErrRegionTooSmall
|
||||
}
|
||||
size := int(binary.BigEndian.Uint32(self.Slice()[at : at+NUM_BYTES_FOR_SIZE]))
|
||||
s = s[NUM_BYTES_FOR_SIZE:]
|
||||
if len(s) < size {
|
||||
return nil, ErrRegionTooSmall
|
||||
}
|
||||
return s[:size], nil
|
||||
}
|
||||
|
||||
func ReadWithSizeAndUnlink(name string, file_callback ...func(fs.FileInfo) error) ([]byte, error) {
|
||||
mmap, err := Open(name, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(file_callback) > 0 {
|
||||
s, err := mmap.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to stat SHM file with error: %w", err)
|
||||
}
|
||||
for _, f := range file_callback {
|
||||
err = f(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
mmap.Close()
|
||||
_ = mmap.Unlink()
|
||||
}()
|
||||
slice, err := ReadWithSize(mmap, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans := make([]byte, len(slice))
|
||||
copy(ans, slice)
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func Read(self MMap, b []byte) (n int, err error) {
|
||||
pos, err := self.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if pos < 0 {
|
||||
pos = 0
|
||||
}
|
||||
s := self.Slice()
|
||||
sz := int64(len(s))
|
||||
if pos >= sz {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(b, s[pos:])
|
||||
_, err = self.Seek(int64(n), io.SeekCurrent)
|
||||
return
|
||||
}
|
||||
|
||||
func Write(self MMap, b []byte) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
pos, _ := self.Seek(0, io.SeekCurrent)
|
||||
if pos < 0 {
|
||||
pos = 0
|
||||
}
|
||||
s := self.Slice()
|
||||
if pos >= int64(len(s)) {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
n = copy(s[pos:], b)
|
||||
if _, err = self.Seek(int64(n), io.SeekCurrent); err != nil {
|
||||
return n, err
|
||||
}
|
||||
if n < len(b) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
187
vendor/github.com/kovidgoyal/go-shm/shm_fs.go
generated
vendored
Normal file
187
vendor/github.com/kovidgoyal/go-shm/shm_fs.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
//go:build linux || netbsd || openbsd || dragonfly
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type file_based_mmap struct {
|
||||
f *os.File
|
||||
pos int64
|
||||
region []byte
|
||||
unlinked bool
|
||||
special_name string
|
||||
}
|
||||
|
||||
func ShmUnlink(name string) error {
|
||||
if runtime.GOOS == "openbsd" {
|
||||
return os.Remove(openbsd_shm_path(name))
|
||||
}
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
return os.Remove(filepath.Join(SHM_DIR, name))
|
||||
}
|
||||
|
||||
func file_mmap(f *os.File, size uint64, access AccessFlags, truncate bool, special_name string) (MMap, error) {
|
||||
if truncate {
|
||||
err := truncate_or_unlink(f, size, os.Remove)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
region, err := mmap(int(size), access, int(f.Fd()), 0)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return nil, err
|
||||
}
|
||||
return &file_based_mmap{f: f, region: region, special_name: special_name}, nil
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
self.pos = offset
|
||||
case io.SeekEnd:
|
||||
self.pos = int64(len(self.region)) + offset
|
||||
case io.SeekCurrent:
|
||||
self.pos += offset
|
||||
}
|
||||
return self.pos, nil
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Read(b []byte) (n int, err error) {
|
||||
return Read(self, b)
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Write(b []byte) (n int, err error) {
|
||||
return Write(self, b)
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Stat() (fs.FileInfo, error) {
|
||||
return self.f.Stat()
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Name() string {
|
||||
if self.special_name != "" {
|
||||
return self.special_name
|
||||
}
|
||||
return filepath.Base(self.f.Name())
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Flush() error {
|
||||
return unix.Msync(self.region, unix.MS_SYNC)
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) FileSystemName() string {
|
||||
return self.f.Name()
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Slice() []byte {
|
||||
return self.region
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Close() (err error) {
|
||||
if self.region != nil {
|
||||
self.f.Close()
|
||||
err = munmap(self.region)
|
||||
self.region = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) Unlink() (err error) {
|
||||
if self.unlinked {
|
||||
return nil
|
||||
}
|
||||
self.unlinked = true
|
||||
return os.Remove(self.f.Name())
|
||||
}
|
||||
|
||||
func (self *file_based_mmap) IsFileSystemBacked() bool { return true }
|
||||
|
||||
func openbsd_shm_path(name string) string {
|
||||
hash := sha256.Sum256(UnsafeStringToBytes(name))
|
||||
return filepath.Join(SHM_DIR, UnsafeBytesToString(hash[:])+".shm")
|
||||
}
|
||||
|
||||
func file_path_from_name(name string) string {
|
||||
// See https://github.com/openbsd/src/blob/master/lib/libc/gen/shm_open.c
|
||||
if runtime.GOOS == "openbsd" {
|
||||
return openbsd_shm_path(name)
|
||||
}
|
||||
return filepath.Join(SHM_DIR, name)
|
||||
}
|
||||
|
||||
func create_temp(pattern string, size uint64) (ans MMap, err error) {
|
||||
special_name := ""
|
||||
var prefix, suffix string
|
||||
prefix, suffix, err = prefix_and_suffix(pattern)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var f *os.File
|
||||
try := 0
|
||||
for {
|
||||
name := prefix + RandomFilename() + suffix
|
||||
path := file_path_from_name(name)
|
||||
f, err = os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
try += 1
|
||||
if try > 10000 {
|
||||
return nil, &os.PathError{Op: "createtemp", Path: prefix + "*" + suffix, Err: fs.ErrExist}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, &ErrNotSupported{err: err}
|
||||
}
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
return file_mmap(f, size, WRITE, true, special_name)
|
||||
}
|
||||
|
||||
func open(name string) (*os.File, error) {
|
||||
ans, err := os.OpenFile(file_path_from_name(name), os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if _, serr := os.Stat(SHM_DIR); serr != nil && errors.Is(serr, fs.ErrNotExist) {
|
||||
return nil, &ErrNotSupported{err: serr}
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func Open(name string, size uint64) (MMap, error) {
|
||||
ans, err := open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size == 0 {
|
||||
s, err := ans.Stat()
|
||||
if err != nil {
|
||||
ans.Close()
|
||||
return nil, fmt.Errorf("Failed to stat SHM file with error: %w", err)
|
||||
}
|
||||
size = uint64(s.Size())
|
||||
}
|
||||
return file_mmap(ans, size, READ, false, name)
|
||||
}
|
||||
198
vendor/github.com/kovidgoyal/go-shm/shm_syscall.go
generated
vendored
Normal file
198
vendor/github.com/kovidgoyal/go-shm/shm_syscall.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
//go:build darwin || freebsd
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// ByteSliceFromString makes a zero terminated byte slice from the string
|
||||
func ByteSliceFromString(s string) []byte {
|
||||
a := make([]byte, len(s)+1)
|
||||
copy(a, s)
|
||||
return a
|
||||
}
|
||||
|
||||
func BytePtrFromString(s string) *byte {
|
||||
a := ByteSliceFromString(s)
|
||||
return &a[0]
|
||||
}
|
||||
|
||||
func shm_unlink(name string) (err error) {
|
||||
bname := BytePtrFromString(name)
|
||||
for {
|
||||
_, _, errno := unix.Syscall(unix.SYS_SHM_UNLINK, uintptr(unsafe.Pointer(bname)), 0, 0)
|
||||
if errno != unix.EINTR {
|
||||
if errno != 0 {
|
||||
if errno == unix.ENOENT {
|
||||
err = fs.ErrNotExist
|
||||
} else {
|
||||
err = fmt.Errorf("shm_unlink() failed with error: %w", errno)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ShmUnlink(name string) error {
|
||||
return shm_unlink(name)
|
||||
}
|
||||
|
||||
func shm_open(name string, flags, perm int) (ans *os.File, err error) {
|
||||
bname := BytePtrFromString(name)
|
||||
var fd uintptr
|
||||
var errno unix.Errno
|
||||
for {
|
||||
fd, _, errno = unix.Syscall(unix.SYS_SHM_OPEN, uintptr(unsafe.Pointer(bname)), uintptr(flags), uintptr(perm))
|
||||
if errno != unix.EINTR {
|
||||
if errno != 0 {
|
||||
err = fmt.Errorf("shm_open() failed with error: %w", errno)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
ans = os.NewFile(fd, name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type syscall_based_mmap struct {
|
||||
f *os.File
|
||||
pos int64
|
||||
region []byte
|
||||
unlinked bool
|
||||
}
|
||||
|
||||
func syscall_mmap(f *os.File, size uint64, access AccessFlags, truncate bool) (MMap, error) {
|
||||
if truncate {
|
||||
err := truncate_or_unlink(f, size, shm_unlink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("truncate failed with error: %w", err)
|
||||
}
|
||||
}
|
||||
region, err := mmap(int(size), access, int(f.Fd()), 0)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
_ = shm_unlink(f.Name())
|
||||
return nil, fmt.Errorf("mmap failed with error: %w", err)
|
||||
}
|
||||
return &syscall_based_mmap{f: f, region: region}, nil
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Name() string {
|
||||
return self.f.Name()
|
||||
}
|
||||
func (self *syscall_based_mmap) Stat() (fs.FileInfo, error) {
|
||||
return self.f.Stat()
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Flush() error {
|
||||
return unix.Msync(self.region, unix.MS_SYNC)
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Slice() []byte {
|
||||
return self.region
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Close() (err error) {
|
||||
if self.region != nil {
|
||||
self.f.Close()
|
||||
munmap(self.region)
|
||||
self.region = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Unlink() (err error) {
|
||||
if self.unlinked {
|
||||
return nil
|
||||
}
|
||||
self.unlinked = true
|
||||
return shm_unlink(self.Name())
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
self.pos = offset
|
||||
case io.SeekEnd:
|
||||
self.pos = int64(len(self.region)) + offset
|
||||
case io.SeekCurrent:
|
||||
self.pos += offset
|
||||
}
|
||||
return self.pos, nil
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Read(b []byte) (n int, err error) {
|
||||
return Read(self, b)
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) Write(b []byte) (n int, err error) {
|
||||
return Write(self, b)
|
||||
}
|
||||
|
||||
func (self *syscall_based_mmap) IsFileSystemBacked() bool { return false }
|
||||
func (self *syscall_based_mmap) FileSystemName() string { return "" }
|
||||
|
||||
func create_temp(pattern string, size uint64) (ans MMap, err error) {
|
||||
var prefix, suffix string
|
||||
prefix, suffix, err = prefix_and_suffix(pattern)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if SHM_REQUIRED_PREFIX != "" && !strings.HasPrefix(pattern, SHM_REQUIRED_PREFIX) {
|
||||
// FreeBSD requires name to start with /
|
||||
prefix = SHM_REQUIRED_PREFIX + prefix
|
||||
}
|
||||
var f *os.File
|
||||
try := 0
|
||||
for {
|
||||
name := prefix + RandomFilename() + suffix
|
||||
if len(name) > SHM_NAME_MAX {
|
||||
return nil, ErrPatternTooLong
|
||||
}
|
||||
f, err = shm_open(name, os.O_EXCL|os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil && (errors.Is(err, fs.ErrExist) || errors.Unwrap(err) == unix.EEXIST) {
|
||||
try += 1
|
||||
if try > 10000 {
|
||||
return nil, &os.PathError{Op: "createtemp", Path: prefix + "*" + suffix, Err: fs.ErrExist}
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return syscall_mmap(f, size, WRITE, true)
|
||||
}
|
||||
|
||||
func Open(name string, size uint64) (MMap, error) {
|
||||
ans, err := shm_open(name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size == 0 {
|
||||
s, err := ans.Stat()
|
||||
if err != nil {
|
||||
ans.Close()
|
||||
return nil, fmt.Errorf("Failed to stat SHM file with error: %w", err)
|
||||
}
|
||||
size = uint64(s.Size())
|
||||
}
|
||||
return syscall_mmap(ans, size, READ, false)
|
||||
}
|
||||
7
vendor/github.com/kovidgoyal/go-shm/specific_darwin.go
generated
vendored
Normal file
7
vendor/github.com/kovidgoyal/go-shm/specific_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
const SHM_NAME_MAX = 30
|
||||
const SHM_REQUIRED_PREFIX = ""
|
||||
const SHM_DIR = ""
|
||||
12
vendor/github.com/kovidgoyal/go-shm/specific_dragonfly.go
generated
vendored
Normal file
12
vendor/github.com/kovidgoyal/go-shm/specific_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// https://www.dragonflybsd.org/cgi/web-man?command=shm_open§ion=3
|
||||
const SHM_DIR = "/var/run/shm"
|
||||
7
vendor/github.com/kovidgoyal/go-shm/specific_freebsd.go
generated
vendored
Normal file
7
vendor/github.com/kovidgoyal/go-shm/specific_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
const SHM_NAME_MAX = 1023
|
||||
const SHM_REQUIRED_PREFIX = "/"
|
||||
const SHM_DIR = ""
|
||||
11
vendor/github.com/kovidgoyal/go-shm/specific_linux.go
generated
vendored
Normal file
11
vendor/github.com/kovidgoyal/go-shm/specific_linux.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
const SHM_DIR = "/dev/shm"
|
||||
11
vendor/github.com/kovidgoyal/go-shm/specific_netbsd.go
generated
vendored
Normal file
11
vendor/github.com/kovidgoyal/go-shm/specific_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
const SHM_DIR = "/var/shm"
|
||||
11
vendor/github.com/kovidgoyal/go-shm/specific_openbsd.go
generated
vendored
Normal file
11
vendor/github.com/kovidgoyal/go-shm/specific_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// License: GPLv3 Copyright: 2022, Kovid Goyal, <kovid at kovidgoyal.net>
|
||||
|
||||
package shm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
const SHM_DIR = "/tmp"
|
||||
35
vendor/github.com/kovidgoyal/go-shm/utils.go
generated
vendored
Normal file
35
vendor/github.com/kovidgoyal/go-shm/utils.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package shm
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
not_rand "math/rand/v2"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func RandomFilename() string {
|
||||
b := []byte{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return strconv.FormatUint(uint64(not_rand.Uint32()), 16)
|
||||
}
|
||||
return base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(b)
|
||||
}
|
||||
|
||||
// Unsafely converts s into a byte slice.
|
||||
// If you modify b, then s will also be modified. This violates the
|
||||
// property that strings are immutable.
|
||||
func UnsafeStringToBytes(s string) (b []byte) {
|
||||
return unsafe.Slice(unsafe.StringData(s), len(s))
|
||||
}
|
||||
|
||||
// Unsafely converts b into a string.
|
||||
// If you modify b, then s will also be modified. This violates the
|
||||
// property that strings are immutable.
|
||||
func UnsafeBytesToString(b []byte) (s string) {
|
||||
return unsafe.String(unsafe.SliceData(b), len(b))
|
||||
}
|
||||
3
vendor/github.com/kovidgoyal/imaging/.gitignore
generated
vendored
3
vendor/github.com/kovidgoyal/imaging/.gitignore
generated
vendored
@@ -1,2 +1,3 @@
|
||||
|
||||
lcms/
|
||||
dist/
|
||||
prism.test
|
||||
|
||||
16
vendor/github.com/kovidgoyal/imaging/.nvim.lsp.lua
generated
vendored
Normal file
16
vendor/github.com/kovidgoyal/imaging/.nvim.lsp.lua
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env lua
|
||||
|
||||
local lspconfig = require('lspconfig')
|
||||
|
||||
-- Get the existing gopls configuration to merge with it
|
||||
local gopls_opts = lspconfig.gopls.get_default_options()
|
||||
|
||||
-- Merge the project-specific settings
|
||||
lspconfig.gopls.setup({
|
||||
-- Extend or override settings from your global config
|
||||
settings = vim.tbl_deep_extend("force", gopls_opts.settings or {}, {
|
||||
gopls = {
|
||||
buildFlags = { "-tags=lcms2cgo" },
|
||||
},
|
||||
}),
|
||||
})
|
||||
145
vendor/github.com/kovidgoyal/imaging/README.md
generated
vendored
145
vendor/github.com/kovidgoyal/imaging/README.md
generated
vendored
@@ -1,9 +1,26 @@
|
||||
# Imaging
|
||||
|
||||
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
|
||||
This is pure Go code that makes working with images actually
|
||||
useable on top of the Go stdlib. In addition to the usual PNG/JPEG/WebP/TIFF/BMP/GIF
|
||||
formats that have been supported forever, this package adds support for
|
||||
animated PNG, animated WebP, Google's new "jpegli" JPEG variant
|
||||
and all the netPBM image formats.
|
||||
|
||||
All the image processing functions provided by the package accept any image type that implements `image.Image` interface
|
||||
as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, non-premultiplied alpha).
|
||||
Additionally, this package support color management via ICC profiles and CICP
|
||||
metadata. Opening non-sRGB images automatically converts them to sRGB, so you
|
||||
don't have to think about it. It has full support for ICC v2 and v4 profiles
|
||||
embedded in all the image formats and is extensively tested against the
|
||||
little-cms library.
|
||||
|
||||
It also supports loading image metadata in EXIF format and automatically
|
||||
supports the EXIF orientation flag -- on image load the image is transformed
|
||||
based on that tag automatically.
|
||||
|
||||
It automatically falls back to ImageMagick when available, for image formats
|
||||
it does not support.
|
||||
|
||||
Finally, it provides basic image processing functions
|
||||
(resize, rotate, crop, brightness/contrast adjustments, etc.).
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -13,24 +30,21 @@ as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, n
|
||||
|
||||
https://pkg.go.dev/github.com/kovidgoyal/imaging
|
||||
|
||||
## Usage examples
|
||||
|
||||
A few usage examples can be found below. See the documentation for the full list of supported functions.
|
||||
|
||||
### Image resizing
|
||||
## Quickstart
|
||||
|
||||
```go
|
||||
// Resize srcImage to size = 128x128px using the Lanczos filter.
|
||||
dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos)
|
||||
img, metadata, err := imaging.OpenAll(path, options...)
|
||||
img.Resize(128, 128, imaging.Lanczos)
|
||||
img.SaveAsPNG(path, mode)
|
||||
```
|
||||
|
||||
// Resize srcImage to width = 800px preserving the aspect ratio.
|
||||
dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos)
|
||||
There are also convenience scripts that demonstrate this library in action,
|
||||
note that these are mainly for development and as such they only use the pure
|
||||
Go code and do not fallback to ImageMagick:
|
||||
|
||||
// Scale down srcImage to fit the 800x600px bounding box.
|
||||
dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
|
||||
|
||||
// Resize and crop the srcImage to fill the 100x100px area.
|
||||
dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos)
|
||||
```sh
|
||||
./to-png some-image.whatever some-image.png
|
||||
./to-frames some-animated-image.whatever some-animated-image.apng
|
||||
```
|
||||
|
||||
Imaging supports image resizing using various resampling filters. The most notable ones:
|
||||
@@ -134,98 +148,9 @@ Original image | Hue = 60
|
||||
-----------------------------------|----------------------------------------------|---------------------------------------------
|
||||
 |  | 
|
||||
|
||||
## FAQ
|
||||
|
||||
### Incorrect image orientation after processing (e.g. an image appears rotated after resizing)
|
||||
## Acknowledgements
|
||||
|
||||
Most probably, the given image contains the EXIF orientation tag.
|
||||
The standard `image/*` packages do not support loading and saving
|
||||
this kind of information. To fix the issue, try opening images with
|
||||
the `AutoOrientation` decode option. If this option is set to `true`,
|
||||
the image orientation is changed after decoding, according to the
|
||||
orientation tag (if present). Here's the example:
|
||||
|
||||
```go
|
||||
img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
|
||||
```
|
||||
|
||||
### What's the difference between `imaging` and `gift` packages?
|
||||
|
||||
[imaging](https://github.com/kovidgoyal/imaging)
|
||||
is designed to be a lightweight and simple image manipulation package.
|
||||
It provides basic image processing functions and a few helper functions
|
||||
such as `Open` and `Save`. It consistently returns *image.NRGBA image
|
||||
type (8 bits per channel, RGBA).
|
||||
|
||||
[gift](https://github.com/disintegration/gift)
|
||||
supports more advanced image processing, for example, sRGB/Linear color
|
||||
space conversions. It also supports different output image types
|
||||
(e.g. 16 bits per channel) and provides easy-to-use API for chaining
|
||||
multiple processing steps together.
|
||||
|
||||
## Example code
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"log"
|
||||
|
||||
"github.com/kovidgoyal/imaging"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open a test image.
|
||||
src, err := imaging.Open("testdata/flowers.png")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open image: %v", err)
|
||||
}
|
||||
|
||||
// Crop the original image to 300x300px size using the center anchor.
|
||||
src = imaging.CropAnchor(src, 300, 300, imaging.Center)
|
||||
|
||||
// Resize the cropped image to width = 200px preserving the aspect ratio.
|
||||
src = imaging.Resize(src, 200, 0, imaging.Lanczos)
|
||||
|
||||
// Create a blurred version of the image.
|
||||
img1 := imaging.Blur(src, 5)
|
||||
|
||||
// Create a grayscale version of the image with higher contrast and sharpness.
|
||||
img2 := imaging.Grayscale(src)
|
||||
img2 = imaging.AdjustContrast(img2, 20)
|
||||
img2 = imaging.Sharpen(img2, 2)
|
||||
|
||||
// Create an inverted version of the image.
|
||||
img3 := imaging.Invert(src)
|
||||
|
||||
// Create an embossed version of the image using a convolution filter.
|
||||
img4 := imaging.Convolve3x3(
|
||||
src,
|
||||
[9]float64{
|
||||
-1, -1, 0,
|
||||
-1, 1, 1,
|
||||
0, 1, 1,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
// Create a new image and paste the four produced images into it.
|
||||
dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0})
|
||||
dst = imaging.Paste(dst, img1, image.Pt(0, 0))
|
||||
dst = imaging.Paste(dst, img2, image.Pt(0, 200))
|
||||
dst = imaging.Paste(dst, img3, image.Pt(200, 0))
|
||||
dst = imaging.Paste(dst, img4, image.Pt(200, 200))
|
||||
|
||||
// Save the resulting image as JPEG.
|
||||
err = imaging.Save(dst, "testdata/out_example.jpg")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to save image: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||

|
||||
This is a fork of the un-maintained distraction/imaging project. The color
|
||||
management code was started out from mandykoh/prism and used some code from
|
||||
go-andiamo/iccarus but it was almost completely re-written from scratch.
|
||||
|
||||
52
vendor/github.com/kovidgoyal/imaging/adjust.go
generated
vendored
52
vendor/github.com/kovidgoyal/imaging/adjust.go
generated
vendored
@@ -4,17 +4,20 @@ import (
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
// Grayscale produces a grayscale version of the image.
|
||||
func Grayscale(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+w*4])
|
||||
for range w {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
r := d[0]
|
||||
g := d[1]
|
||||
@@ -27,7 +30,7 @@ func Grayscale(img image.Image) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
@@ -35,13 +38,14 @@ func Grayscale(img image.Image) *image.NRGBA {
|
||||
|
||||
// Invert produces an inverted (negated) version of the image.
|
||||
func Invert(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+w*4])
|
||||
for range w {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = 255 - d[0]
|
||||
d[1] = 255 - d[1]
|
||||
@@ -49,7 +53,7 @@ func Invert(img image.Image) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
@@ -129,7 +133,7 @@ func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
v := (100.0 + percentage) / 100.0
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
switch {
|
||||
case 0 <= v && v <= 1:
|
||||
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0)
|
||||
@@ -160,7 +164,7 @@ func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
shift := 255.0 * percentage / 100.0
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
lut[i] = clamp(float64(i) + shift)
|
||||
}
|
||||
|
||||
@@ -182,7 +186,7 @@ func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
|
||||
e := 1.0 / math.Max(gamma, 0.0001)
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0)
|
||||
}
|
||||
|
||||
@@ -236,14 +240,15 @@ func sigmoid(a, b, x float64) float64 {
|
||||
|
||||
// adjustLUT applies the given lookup table to the colors of the image.
|
||||
func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
lut = lut[0:256]
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+w*4])
|
||||
for range w {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = lut[d[0]]
|
||||
d[1] = lut[d[1]]
|
||||
@@ -251,7 +256,7 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
@@ -273,13 +278,14 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
// }
|
||||
// )
|
||||
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+w*4])
|
||||
for range w {
|
||||
d := dst.Pix[i : i+4 : i+4]
|
||||
r := d[0]
|
||||
g := d[1]
|
||||
@@ -293,7 +299,7 @@ func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGB
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
|
||||
483
vendor/github.com/kovidgoyal/imaging/animation.go
generated
vendored
Normal file
483
vendor/github.com/kovidgoyal/imaging/animation.go
generated
vendored
Normal file
@@ -0,0 +1,483 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"image/gif"
|
||||
"image/png"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kovidgoyal/imaging/apng"
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/gifmeta"
|
||||
"github.com/kovidgoyal/imaging/webp"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type Frame struct {
|
||||
Number uint // a 1-based frame number
|
||||
TopLeft image.Point // location of top-left of this frame w.r.t top left of first frame
|
||||
Image image.Image `json:"-"` // the actual pixel data
|
||||
Delay time.Duration // the time for which this frame should be visible
|
||||
ComposeOnto uint // the frame number of the frame this frame should be composed onto. 0 means compose onto blank
|
||||
Replace bool // Do a simple pixel replacement rather than a full alpha blend when compositing this frame
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
Frames []*Frame // the actual frames of image data. The first frame is guaranteed to be the size of the image.
|
||||
Metadata *meta.Data // image metadata
|
||||
LoopCount uint // 0 means loop forever, 1 means loop once, ...
|
||||
DefaultImage image.Image `json:"-"` // a "default image" for an animation that is not part of the actual animation
|
||||
}
|
||||
|
||||
func (self *Image) populate_from_apng(p *apng.APNG) {
|
||||
self.LoopCount = p.LoopCount
|
||||
prev_disposal := apng.DISPOSE_OP_BACKGROUND
|
||||
var prev_compose_onto uint
|
||||
for _, f := range p.Frames {
|
||||
if f.IsDefault {
|
||||
self.DefaultImage = f.Image
|
||||
continue
|
||||
}
|
||||
frame := Frame{Number: uint(len(self.Frames) + 1), Image: NormalizeOrigin(f.Image),
|
||||
TopLeft: image.Point{X: f.XOffset, Y: f.YOffset},
|
||||
Replace: f.BlendOp == apng.BLEND_OP_SOURCE,
|
||||
Delay: time.Duration(float64(time.Second) * f.GetDelay())}
|
||||
switch prev_disposal {
|
||||
case apng.DISPOSE_OP_NONE:
|
||||
frame.ComposeOnto = frame.Number - 1
|
||||
case apng.DISPOSE_OP_PREVIOUS:
|
||||
frame.ComposeOnto = uint(prev_compose_onto)
|
||||
}
|
||||
prev_disposal, prev_compose_onto = int(f.DisposeOp), frame.ComposeOnto
|
||||
self.Frames = append(self.Frames, &frame)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Frame) Bounds() image.Rectangle {
|
||||
return f.Image.Bounds().Add(f.TopLeft)
|
||||
}
|
||||
|
||||
func (f *Frame) ColorModel() color.Model {
|
||||
return f.Image.ColorModel()
|
||||
}
|
||||
|
||||
func (f *Frame) At(x, y int) color.Color {
|
||||
return f.Image.At(x-f.TopLeft.X, y-f.TopLeft.Y)
|
||||
}
|
||||
|
||||
func (f *Frame) Dx() int { return f.Image.Bounds().Dx() }
|
||||
func (f *Frame) Dy() int { return f.Image.Bounds().Dy() }
|
||||
|
||||
type canvas_t = image.NRGBA
|
||||
|
||||
var new_canvas = image.NewNRGBA
|
||||
|
||||
func (self *Image) populate_from_webp(p *webp.AnimatedWEBP) {
|
||||
// See https://developers.google.com/speed/webp/docs/riff_container#animation
|
||||
self.LoopCount = uint(p.Header.LoopCount)
|
||||
bgcol := p.Header.BackgroundColor
|
||||
// For some reason web viewers treat bgcol as full transparent. Sigh.
|
||||
bgcol = image.Transparent
|
||||
bg := image.NewUniform(bgcol)
|
||||
_, _, _, a := bg.RGBA()
|
||||
bg_is_fully_transparent := a == 0
|
||||
w, h := int(self.Metadata.PixelWidth), int(self.Metadata.PixelHeight)
|
||||
var dispose_prev bool
|
||||
for i, f := range p.Frames {
|
||||
frame := Frame{
|
||||
Number: uint(len(self.Frames) + 1), Image: NormalizeOrigin(f.Frame),
|
||||
TopLeft: image.Point{X: 2 * int(f.Header.FrameX), Y: 2 * int(f.Header.FrameY)},
|
||||
Replace: !f.Header.AlphaBlend,
|
||||
Delay: time.Millisecond * time.Duration(f.Header.FrameDuration),
|
||||
}
|
||||
// we want the first frame to have the same size as the canvas, which
|
||||
// is not always true in WebP
|
||||
if i == 0 && (frame.Dx() < w || frame.Dy() < h || frame.TopLeft != image.Point{}) {
|
||||
img := new_canvas(image.Rect(0, 0, w, h))
|
||||
dest := image.Rectangle{frame.TopLeft, frame.TopLeft.Add(image.Pt(frame.Bounds().Dx(), frame.Bounds().Dy()))}
|
||||
if !bg_is_fully_transparent {
|
||||
draw.Draw(img, img.Bounds(), bg, image.Point{}, draw.Src)
|
||||
draw.Draw(img, dest, frame.Image, image.Point{}, draw.Over)
|
||||
} else {
|
||||
draw.Draw(img, dest, frame.Image, image.Point{}, draw.Src)
|
||||
}
|
||||
frame.Image = img
|
||||
frame.TopLeft = image.Point{}
|
||||
}
|
||||
|
||||
frame.ComposeOnto = frame.Number - 1
|
||||
if dispose_prev {
|
||||
// According to the spec dispose only affects the area of the
|
||||
// frame, filling it with the background color on disposal, so
|
||||
// add an extra frame that clears the prev frame's region and then
|
||||
// draw the current frame as gapless frame.
|
||||
prev_frame := self.Frames[len(self.Frames)-1]
|
||||
b := prev_frame.Image.Bounds()
|
||||
if bg_is_fully_transparent && (prev_frame.TopLeft == image.Point{}) && b.Dx() >= w && b.Dy() >= h {
|
||||
// prev frame covered entire canvas and background is clear so
|
||||
// just clear canvas
|
||||
frame.ComposeOnto = 0
|
||||
} else {
|
||||
img := image.NewNRGBA(b)
|
||||
draw.Draw(img, b, bg, image.Point{}, draw.Src)
|
||||
if b == frame.Image.Bounds() && prev_frame.TopLeft == frame.TopLeft {
|
||||
// prev frame and this frame overlap exactly, so just compose
|
||||
// directly without needing an extra frame
|
||||
draw.Draw(img, b, frame.Image, image.Point{}, draw.Over)
|
||||
frame.Replace = true
|
||||
frame.Image = img
|
||||
} else {
|
||||
// insert gapless frame to dispose previous frame
|
||||
nf := Frame{
|
||||
Number: frame.Number, Image: img, TopLeft: prev_frame.TopLeft, Replace: true,
|
||||
ComposeOnto: prev_frame.Number,
|
||||
}
|
||||
self.Frames = append(self.Frames, &nf)
|
||||
frame.Number++
|
||||
frame.ComposeOnto = nf.Number
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispose_prev = f.Header.DisposalBitSet
|
||||
self.Frames = append(self.Frames, &frame)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Image) populate_from_gif(g *gif.GIF) {
|
||||
min_gap := gifmeta.CalcMinimumGap(g.Delay)
|
||||
prev_disposal := uint8(gif.DisposalBackground)
|
||||
var prev_compose_onto uint
|
||||
for i, img := range g.Image {
|
||||
b := img.Bounds()
|
||||
frame := Frame{
|
||||
Number: uint(len(self.Frames) + 1), Image: NormalizeOrigin(img), TopLeft: b.Min,
|
||||
Delay: gifmeta.CalculateFrameDelay(g.Delay[i], min_gap),
|
||||
}
|
||||
switch prev_disposal {
|
||||
case gif.DisposalNone, 0:
|
||||
frame.ComposeOnto = frame.Number - 1
|
||||
case gif.DisposalPrevious:
|
||||
frame.ComposeOnto = prev_compose_onto
|
||||
case gif.DisposalBackground:
|
||||
// this is in contravention of the GIF spec but browsers and
|
||||
// gif2apng both do this, so follow them. Test image for this
|
||||
// is apple.gif
|
||||
frame.ComposeOnto = frame.Number - 1
|
||||
}
|
||||
prev_disposal, prev_compose_onto = g.Disposal[i], frame.ComposeOnto
|
||||
self.Frames = append(self.Frames, &frame)
|
||||
}
|
||||
switch {
|
||||
case g.LoopCount == 0:
|
||||
self.LoopCount = 0
|
||||
case g.LoopCount < 0:
|
||||
self.LoopCount = 1
|
||||
default:
|
||||
self.LoopCount = uint(g.LoopCount) + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Create a clone of this image, all data is copied
|
||||
func (self *Image) Clone() *Image {
|
||||
ans := *self
|
||||
if ans.DefaultImage != nil {
|
||||
ans.DefaultImage = ClonePreservingType(ans.DefaultImage)
|
||||
}
|
||||
if ans.Metadata != nil {
|
||||
ans.Metadata = ans.Metadata.Clone()
|
||||
}
|
||||
ans.Frames = make([]*Frame, len(self.Frames))
|
||||
for i, f := range self.Frames {
|
||||
nf := *f
|
||||
nf.Image = ClonePreservingType(f.Image)
|
||||
ans.Frames[i] = &nf
|
||||
}
|
||||
return &ans
|
||||
}
|
||||
|
||||
// Coalesce all animation frames so that each frame is a snapshot of the
|
||||
// animation at that instant.
|
||||
func (self *Image) Coalesce() {
|
||||
if len(self.Frames) == 1 {
|
||||
return
|
||||
}
|
||||
canvas_rect := self.Frames[0].Bounds()
|
||||
var canvas *canvas_t
|
||||
for i, f := range self.Frames {
|
||||
if i == 0 || f.ComposeOnto == 0 {
|
||||
canvas = new_canvas(canvas_rect)
|
||||
} else {
|
||||
canvas = ClonePreservingType(self.Frames[f.ComposeOnto-1].Image).(*canvas_t)
|
||||
}
|
||||
op := draw.Over
|
||||
if f.Replace {
|
||||
op = draw.Src
|
||||
}
|
||||
draw.Draw(canvas, f.Bounds(), f.Image, image.Point{}, op)
|
||||
f.Image = canvas
|
||||
f.TopLeft = image.Point{}
|
||||
f.ComposeOnto = 0
|
||||
f.Replace = true
|
||||
}
|
||||
}
|
||||
|
||||
// converts a time.Duration to a numerator and denominator of type uint16.
|
||||
// It finds the best rational approximation of the duration in seconds.
|
||||
func as_fraction(d time.Duration) (num, den uint16) {
|
||||
if d <= 0 {
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// Convert duration to seconds as a float64
|
||||
val := d.Seconds()
|
||||
|
||||
// Use continued fractions to find the best rational approximation.
|
||||
// We look for the convergent that is closest to the original value
|
||||
// while keeping the numerator and denominator within uint16 bounds.
|
||||
|
||||
bestNum, bestDen := uint16(0), uint16(1)
|
||||
bestError := math.Abs(val)
|
||||
|
||||
var h, k [3]int64
|
||||
h[0], k[0] = 0, 1
|
||||
h[1], k[1] = 1, 0
|
||||
|
||||
f := val
|
||||
|
||||
for i := 2; i < 100; i++ { // Limit iterations to prevent infinite loops
|
||||
a := int64(f)
|
||||
|
||||
// Calculate next convergent
|
||||
h[2] = a*h[1] + h[0]
|
||||
k[2] = a*k[1] + k[0]
|
||||
|
||||
if h[2] > math.MaxUint16 || k[2] > math.MaxUint16 {
|
||||
// This convergent is out of bounds, so the previous one was the best we could do.
|
||||
break
|
||||
}
|
||||
|
||||
numConv := uint16(h[2])
|
||||
denConv := uint16(k[2])
|
||||
|
||||
currentVal := float64(numConv) / float64(denConv)
|
||||
currentError := math.Abs(val - currentVal)
|
||||
|
||||
if currentError < bestError {
|
||||
bestError = currentError
|
||||
bestNum = numConv
|
||||
bestDen = denConv
|
||||
}
|
||||
|
||||
// Check if we have a perfect approximation
|
||||
if f-float64(a) == 0.0 {
|
||||
break
|
||||
}
|
||||
|
||||
f = 1.0 / (f - float64(a))
|
||||
|
||||
h[0], h[1] = h[1], h[2]
|
||||
k[0], k[1] = k[1], k[2]
|
||||
}
|
||||
|
||||
return bestNum, bestDen
|
||||
}
|
||||
|
||||
func (self *Image) as_apng() (ans apng.APNG) {
|
||||
ans.LoopCount = self.LoopCount
|
||||
if self.DefaultImage != nil {
|
||||
ans.Frames = append(ans.Frames, apng.Frame{Image: self.DefaultImage, IsDefault: true})
|
||||
}
|
||||
for i, f := range self.Frames {
|
||||
d := apng.Frame{
|
||||
DisposeOp: apng.DISPOSE_OP_BACKGROUND, BlendOp: apng.BLEND_OP_OVER, XOffset: f.TopLeft.X, YOffset: f.TopLeft.Y, Image: f.Image,
|
||||
}
|
||||
if !f.Replace {
|
||||
d.BlendOp = apng.BLEND_OP_SOURCE
|
||||
}
|
||||
d.DelayNumerator, d.DelayDenominator = as_fraction(f.Delay)
|
||||
if i+1 < len(self.Frames) {
|
||||
nf := self.Frames[i+1]
|
||||
switch nf.ComposeOnto {
|
||||
case f.Number:
|
||||
d.DisposeOp = apng.DISPOSE_OP_NONE
|
||||
case 0:
|
||||
d.DisposeOp = apng.DISPOSE_OP_BACKGROUND
|
||||
case f.ComposeOnto:
|
||||
d.DisposeOp = apng.DISPOSE_OP_PREVIOUS
|
||||
}
|
||||
}
|
||||
ans.Frames = append(ans.Frames, d)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Encode this image into a PNG
|
||||
func (self *Image) EncodeAsPNG(w io.Writer) error {
|
||||
if len(self.Frames) < 2 {
|
||||
img := self.DefaultImage
|
||||
if img == nil {
|
||||
img = self.Frames[0].Image
|
||||
}
|
||||
return png.Encode(w, img)
|
||||
}
|
||||
// Unfortunately apng.Encode() is buggy or I am getting my dispose op
|
||||
// mapping wrong, so coalesce first
|
||||
img := self.Clone()
|
||||
img.Coalesce()
|
||||
return apng.Encode(w, img.as_apng())
|
||||
}
|
||||
|
||||
// Save this image as PNG
|
||||
func (self *Image) SaveAsPNG(path string, mode fs.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return self.EncodeAsPNG(f)
|
||||
}
|
||||
|
||||
// Flip all frames horizontally
|
||||
func (self *Image) FlipH() {
|
||||
for _, f := range self.Frames {
|
||||
f.Image = FlipH(f.Image)
|
||||
}
|
||||
}
|
||||
|
||||
// Flip all frames vertically
|
||||
func (self *Image) FlipV() {
|
||||
for _, f := range self.Frames {
|
||||
f.Image = FlipV(f.Image)
|
||||
}
|
||||
}
|
||||
|
||||
type rotation struct {
|
||||
angle_rads, cos, sin, center_x, center_y float64
|
||||
}
|
||||
|
||||
func new_rotation(angle_deg float64, canvas_rect image.Rectangle) *rotation {
|
||||
a := angle_deg * (math.Pi / 180.)
|
||||
return &rotation{a, math.Cos(a), math.Sin(a), float64(canvas_rect.Dx()) / 2, float64(canvas_rect.Dy()) / 2}
|
||||
}
|
||||
|
||||
func (r *rotation) apply(p image.Point) image.Point {
|
||||
if (p == image.Point{}) {
|
||||
return p
|
||||
}
|
||||
x := float64(p.X) - r.center_x
|
||||
y := float64(p.Y) - r.center_y
|
||||
rx := x*r.cos - y*r.sin
|
||||
ry := x*r.sin + y*r.cos
|
||||
return image.Pt(int(rx+r.center_x), int(ry+r.center_y))
|
||||
}
|
||||
|
||||
func (self *Image) Bounds() image.Rectangle {
|
||||
if self.DefaultImage != nil {
|
||||
return self.DefaultImage.Bounds()
|
||||
}
|
||||
if len(self.Frames) > 0 {
|
||||
return self.Frames[0].Bounds()
|
||||
}
|
||||
return image.Rect(0, 0, int(self.Metadata.PixelWidth), int(self.Metadata.PixelHeight))
|
||||
}
|
||||
|
||||
// Transpose all frames (flip and rotate 90)
|
||||
func (self *Image) Transpose() {
|
||||
r := new_rotation(90, self.Bounds())
|
||||
for _, f := range self.Frames {
|
||||
f.Image = Transpose(f.Image)
|
||||
f.TopLeft = r.apply(f.TopLeft)
|
||||
}
|
||||
if self.DefaultImage != nil {
|
||||
self.DefaultImage = Transpose(self.DefaultImage)
|
||||
}
|
||||
self.Metadata.PixelWidth, self.Metadata.PixelHeight = self.Metadata.PixelHeight, self.Metadata.PixelWidth
|
||||
}
|
||||
|
||||
// Transverse all frames (flip and rotate 90)
|
||||
func (self *Image) Transverse() {
|
||||
r := new_rotation(90, self.Bounds())
|
||||
for _, f := range self.Frames {
|
||||
f.Image = Transverse(f.Image)
|
||||
f.TopLeft = r.apply(f.TopLeft)
|
||||
}
|
||||
if self.DefaultImage != nil {
|
||||
self.DefaultImage = Transverse(self.DefaultImage)
|
||||
}
|
||||
self.Metadata.PixelWidth, self.Metadata.PixelHeight = self.Metadata.PixelHeight, self.Metadata.PixelWidth
|
||||
}
|
||||
|
||||
// Rotate all frames by 90 counter clockwise
|
||||
func (self *Image) Rotate90() {
|
||||
r := new_rotation(90, self.Bounds())
|
||||
for _, f := range self.Frames {
|
||||
f.Image = Rotate90(f.Image)
|
||||
f.TopLeft = r.apply(f.TopLeft)
|
||||
}
|
||||
if self.DefaultImage != nil {
|
||||
self.DefaultImage = Rotate90(self.DefaultImage)
|
||||
}
|
||||
self.Metadata.PixelWidth, self.Metadata.PixelHeight = self.Metadata.PixelHeight, self.Metadata.PixelWidth
|
||||
}
|
||||
|
||||
// Rotate all frames by 180 counter clockwise
|
||||
func (self *Image) Rotate180() {
|
||||
r := new_rotation(180, self.Bounds())
|
||||
for _, f := range self.Frames {
|
||||
f.Image = Rotate180(f.Image)
|
||||
f.TopLeft = r.apply(f.TopLeft)
|
||||
}
|
||||
if self.DefaultImage != nil {
|
||||
self.DefaultImage = Rotate180(self.DefaultImage)
|
||||
}
|
||||
}
|
||||
|
||||
// Rotate all frames by 270 counter clockwise
|
||||
func (self *Image) Rotate270() {
|
||||
r := new_rotation(270, self.Bounds())
|
||||
for _, f := range self.Frames {
|
||||
f.Image = Rotate270(f.Image)
|
||||
f.TopLeft = r.apply(f.TopLeft)
|
||||
}
|
||||
self.Metadata.PixelWidth, self.Metadata.PixelHeight = self.Metadata.PixelHeight, self.Metadata.PixelWidth
|
||||
if self.DefaultImage != nil {
|
||||
self.DefaultImage = Rotate270(self.DefaultImage)
|
||||
}
|
||||
}
|
||||
|
||||
// Resize all frames to the specified size
|
||||
func (self *Image) Resize(width, height int, filter ResampleFilter) {
|
||||
old_width, old_height := self.Bounds().Dx(), self.Bounds().Dy()
|
||||
sx := float64(width) / float64(old_width)
|
||||
sy := float64(height) / float64(old_height)
|
||||
scaledx := func(x int) int { return int(float64(x) * sx) }
|
||||
scaledy := func(y int) int { return int(float64(y) * sy) }
|
||||
for i, f := range self.Frames {
|
||||
if i == 0 {
|
||||
f.Image = ResizeWithOpacity(f.Image, width, height, filter, IsOpaque(f.Image))
|
||||
} else {
|
||||
f.Image = ResizeWithOpacity(f.Image, scaledx(f.Image.Bounds().Dx()), scaledy(f.Image.Bounds().Dy()), filter, IsOpaque(f.Image))
|
||||
f.TopLeft = image.Pt(scaledx(f.TopLeft.X), scaledy(f.TopLeft.Y))
|
||||
}
|
||||
}
|
||||
self.Metadata.PixelWidth, self.Metadata.PixelHeight = uint32(width), uint32(height)
|
||||
}
|
||||
|
||||
// Paste all frames onto the specified background color (OVER alpha blend)
|
||||
func (img *Image) PasteOntoBackground(bg color.Color) {
|
||||
if img.DefaultImage != nil {
|
||||
img.DefaultImage = PasteOntoBackground(img.DefaultImage, bg)
|
||||
}
|
||||
for _, f := range img.Frames {
|
||||
f.Image = PasteOntoBackground(f.Image, bg)
|
||||
}
|
||||
}
|
||||
30
vendor/github.com/kovidgoyal/imaging/apng/LICENSE
generated
vendored
Normal file
30
vendor/github.com/kovidgoyal/imaging/apng/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
Original PNG code Copyright (c) 2009 The Go Authors.
|
||||
APNG enhancements Copyright (c) 2018 Ketchetwahmeegwun T. Southall /
|
||||
kts of kettek.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
126
vendor/github.com/kovidgoyal/imaging/apng/README.md
generated
vendored
Normal file
126
vendor/github.com/kovidgoyal/imaging/apng/README.md
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
# APNG golang library
|
||||
This `apng` package provides methods for decoding and encoding APNG files. It is based upon the work in the official "image/png" package.
|
||||
|
||||
See [apngr](https://github.com/kettek/apngr) for an APNG extraction and combination tool using this library.
|
||||
|
||||
**NOTE**: The decoder should work for most anything you throw at it. Malformed PNGs should result in an error message. The encoder currently doesn't handle differences of Image formats and similar and has not been tested as thoroughly.
|
||||
|
||||
If a regular PNG file is read, the first Frame of the APNG returned by `DecodeAll(*File)` will be the PNG data.
|
||||
|
||||
## Types
|
||||
### APNG
|
||||
The APNG type contains the frames of a decoded `.apng` file, along with any important properties. It may also be created and used for Encoding.
|
||||
|
||||
| Signature | Description |
|
||||
|----------------|----------------------------------------------------------------------------------------------------------|
|
||||
| Frames []Frame | The stored frames of the APNG. |
|
||||
| LoopCount uint | The number of times an animation should be restarted during display. A value of 0 means to loop forever. |
|
||||
|
||||
### Frame
|
||||
The Frame type contains an individual frame of an APNG. The following table provides the important properties and methods.
|
||||
|
||||
| Signature | Description |
|
||||
|----------------------|-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Image image.Image | Frame image data. |
|
||||
| IsDefault bool | Indicates if this frame is a default image that should not be included as part of the animation frames. May only be true for the first Frame. |
|
||||
| XOffset int | Returns the x offset of the frame. |
|
||||
| YOffset int | Returns the y offset of the frame. |
|
||||
| DelayNumerator int | Returns the delay numerator. |
|
||||
| DelayDenominator int | Returns the delay denominator. |
|
||||
| DisposeOp byte | Returns the frame disposal operation. May be `apng.DISPOSE_OP_NONE`, `apng.DISPOSE_OP_BACKGROUND`, or `apng.DISPOSE_OP_PREVIOUS`. See the [APNG Specification](https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk) for more information. |
|
||||
| BlendOp byte | Returns the frame blending operation. May be `apng.BLEND_OP_SOURCE` or `apng.BLEND_OP_OVER`. See the [APNG Specification](https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk) for more information. |
|
||||
|
||||
## Methods
|
||||
### DecodeAll(io.Reader) (APNG, error)
|
||||
This method returns an APNG type containing the frames and associated data within the passed file.
|
||||
|
||||
### Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"github.com/kettek/apng"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open our animated PNG file
|
||||
f, err := os.Open("animation.png")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
// Decode all frames into an APNG
|
||||
a, err := apng.DecodeAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Print some information on the APNG
|
||||
log.Printf("Found %d frames\n", len(a.Frames))
|
||||
for i, frame := range a.Frames {
|
||||
b := frame.Image.Bounds()
|
||||
log.Printf("Frame %d: %dx%d\n", i, b.Max.X, b.Max.Y)
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Decode(io.Reader) (image.Image, error)
|
||||
This method returns the Image of the default frame of an APNG file.
|
||||
|
||||
### Encode(io.Writer, APNG) error
|
||||
This method writes the passed APNG object to the given io.Writer as an APNG binary file.
|
||||
|
||||
### Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"image/png"
|
||||
"os"
|
||||
"github.com/kettek/apng"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Define our variables
|
||||
output := "animation.png"
|
||||
images := [4]string{"0.png", "1.png", "2.png", "3.png"}
|
||||
a := apng.APNG{
|
||||
Frames: make([]apng.Frame, len(images)),
|
||||
}
|
||||
// Open our file for writing
|
||||
out, err := os.Create(output)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer out.Close()
|
||||
// Assign each decoded PNG's Image to the appropriate Frame Image
|
||||
for i, s := range images {
|
||||
in, err := os.Open(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer in.Close()
|
||||
m, err := png.Decode(in)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
a.Frames[i].Image = m
|
||||
}
|
||||
// Write APNG to our output file
|
||||
apng.Encode(out, a)
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Compression
|
||||
A custom compression writer can be used instead of the default zlib writer. This can be done by creating a specific apng Encoder and assigning a construction function to the `CompressionWriter` field:
|
||||
|
||||
```go
|
||||
enc := apng.Encoder{
|
||||
CompressionWriter: func(w io.Writer) (apng.CompressionWriter, error) {
|
||||
return NewCustomZlibWriter(w)
|
||||
},
|
||||
}
|
||||
enc.Encode(out, a)
|
||||
```
|
||||
13
vendor/github.com/kovidgoyal/imaging/apng/apng.go
generated
vendored
Normal file
13
vendor/github.com/kovidgoyal/imaging/apng/apng.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2018 kts of kettek / Ketchetwahmeegwun Tecumseh Southall. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package apng
|
||||
|
||||
type APNG struct {
|
||||
Frames []Frame
|
||||
// LoopCount defines the number of times an animation will be
|
||||
// restarted during display.
|
||||
// A LoopCount of 0 means to loop forever
|
||||
LoopCount uint
|
||||
}
|
||||
47
vendor/github.com/kovidgoyal/imaging/apng/frame.go
generated
vendored
Normal file
47
vendor/github.com/kovidgoyal/imaging/apng/frame.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2018 kts of kettek / Ketchetwahmeegwun Tecumseh Southall. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package apng
|
||||
|
||||
import (
|
||||
"image"
|
||||
)
|
||||
|
||||
// dispose_op values, as per the APNG spec.
|
||||
const (
|
||||
DISPOSE_OP_NONE = 0
|
||||
DISPOSE_OP_BACKGROUND = 1
|
||||
DISPOSE_OP_PREVIOUS = 2
|
||||
)
|
||||
|
||||
// blend_op values, as per the APNG spec.
|
||||
const (
|
||||
BLEND_OP_SOURCE = 0
|
||||
BLEND_OP_OVER = 1
|
||||
)
|
||||
|
||||
type Frame struct {
|
||||
Image image.Image
|
||||
width, height int
|
||||
XOffset, YOffset int
|
||||
DelayNumerator uint16
|
||||
DelayDenominator uint16
|
||||
DisposeOp byte
|
||||
BlendOp byte
|
||||
// IsDefault indicates if the Frame is a default image that
|
||||
// should not be used in the animation. IsDefault can only
|
||||
// be true on the first frame.
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
// GetDelay returns the number of seconds in the frame.
|
||||
func (f *Frame) GetDelay() float64 {
|
||||
d := uint16(0)
|
||||
if f.DelayDenominator == 0 {
|
||||
d = 100
|
||||
} else {
|
||||
d = f.DelayDenominator
|
||||
}
|
||||
return float64(f.DelayNumerator) / float64(d)
|
||||
}
|
||||
71
vendor/github.com/kovidgoyal/imaging/apng/paeth.go
generated
vendored
Normal file
71
vendor/github.com/kovidgoyal/imaging/apng/paeth.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package apng
|
||||
|
||||
// intSize is either 32 or 64.
|
||||
const intSize = 32 << (^uint(0) >> 63)
|
||||
|
||||
func abs(x int) int {
|
||||
// m := -1 if x < 0. m := 0 otherwise.
|
||||
m := x >> (intSize - 1)
|
||||
|
||||
// In two's complement representation, the negative number
|
||||
// of any number (except the smallest one) can be computed
|
||||
// by flipping all the bits and add 1. This is faster than
|
||||
// code with a branch.
|
||||
// See Hacker's Delight, section 2-4.
|
||||
return (x ^ m) - m
|
||||
}
|
||||
|
||||
// paeth implements the Paeth filter function, as per the PNG specification.
|
||||
func paeth(a, b, c uint8) uint8 {
|
||||
// This is an optimized version of the sample code in the PNG spec.
|
||||
// For example, the sample code starts with:
|
||||
// p := int(a) + int(b) - int(c)
|
||||
// pa := abs(p - int(a))
|
||||
// but the optimized form uses fewer arithmetic operations:
|
||||
// pa := int(b) - int(c)
|
||||
// pa = abs(pa)
|
||||
pc := int(c)
|
||||
pa := int(b) - pc
|
||||
pb := int(a) - pc
|
||||
pc = abs(pa + pb)
|
||||
pa = abs(pa)
|
||||
pb = abs(pb)
|
||||
if pa <= pb && pa <= pc {
|
||||
return a
|
||||
} else if pb <= pc {
|
||||
return b
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// filterPaeth applies the Paeth filter to the cdat slice.
|
||||
// cdat is the current row's data, pdat is the previous row's data.
|
||||
func filterPaeth(cdat, pdat []byte, bytesPerPixel int) {
|
||||
var a, b, c, pa, pb, pc int
|
||||
for i := range bytesPerPixel {
|
||||
a, c = 0, 0
|
||||
for j := i; j < len(cdat); j += bytesPerPixel {
|
||||
b = int(pdat[j])
|
||||
pa = b - c
|
||||
pb = a - c
|
||||
pc = abs(pa + pb)
|
||||
pa = abs(pa)
|
||||
pb = abs(pb)
|
||||
if pa <= pb && pa <= pc {
|
||||
// No-op.
|
||||
} else if pb <= pc {
|
||||
a = b
|
||||
} else {
|
||||
a = c
|
||||
}
|
||||
a += int(cdat[j])
|
||||
a &= 0xff
|
||||
cdat[j] = uint8(a)
|
||||
c = b
|
||||
}
|
||||
}
|
||||
}
|
||||
1151
vendor/github.com/kovidgoyal/imaging/apng/reader.go
generated
vendored
Normal file
1151
vendor/github.com/kovidgoyal/imaging/apng/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
733
vendor/github.com/kovidgoyal/imaging/apng/writer.go
generated
vendored
Normal file
733
vendor/github.com/kovidgoyal/imaging/apng/writer.go
generated
vendored
Normal file
@@ -0,0 +1,733 @@
|
||||
// Original PNG code Copyright 2009 The Go Authors.
|
||||
// Additional APNG enhancements Copyright 2018 Ketchetwahmeegwun
|
||||
// Tecumseh Southall / kts of kettek.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package apng
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Encoder configures encoding PNG images.
|
||||
type Encoder struct {
|
||||
CompressionLevel CompressionLevel
|
||||
|
||||
// BufferPool optionally specifies a buffer pool to get temporary
|
||||
// EncoderBuffers when encoding an image.
|
||||
BufferPool EncoderBufferPool
|
||||
|
||||
// CompressionWriter optionally provides a external zlib compression
|
||||
// writer for writing PNG image data.
|
||||
CompressionWriter func(w io.Writer) (CompressionWriter, error)
|
||||
}
|
||||
|
||||
// CompressionWriter zlib compression writer interface.
|
||||
type CompressionWriter interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
Reset(w io.Writer)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// EncoderBufferPool is an interface for getting and returning temporary
|
||||
// instances of the EncoderBuffer struct. This can be used to reuse buffers
|
||||
// when encoding multiple images.
|
||||
type EncoderBufferPool interface {
|
||||
Get() *EncoderBuffer
|
||||
Put(*EncoderBuffer)
|
||||
}
|
||||
|
||||
// EncoderBuffer holds the buffers used for encoding PNG images.
|
||||
type EncoderBuffer encoder
|
||||
|
||||
type encoder struct {
|
||||
enc *Encoder
|
||||
w io.Writer
|
||||
a APNG
|
||||
writeType int // 0 = IDAT, 1 = fdAT
|
||||
seq int
|
||||
cb int
|
||||
err error
|
||||
header [8]byte
|
||||
footer [4]byte
|
||||
tmp [4 * 256]byte
|
||||
cr [nFilter][]uint8
|
||||
pr []uint8
|
||||
zw CompressionWriter
|
||||
zwLevel CompressionLevel
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
// CompressionLevel indicates the compression level.
|
||||
type CompressionLevel int
|
||||
|
||||
const (
|
||||
DefaultCompression CompressionLevel = 0
|
||||
NoCompression CompressionLevel = -1
|
||||
BestSpeed CompressionLevel = -2
|
||||
BestCompression CompressionLevel = -3
|
||||
|
||||
// Positive CompressionLevel values are reserved to mean a numeric zlib
|
||||
// compression level, although that is not implemented yet.
|
||||
)
|
||||
|
||||
type opaquer interface {
|
||||
Opaque() bool
|
||||
}
|
||||
|
||||
// Returns whether or not the image is fully opaque.
|
||||
func opaque(m image.Image) bool {
|
||||
if o, ok := m.(opaquer); ok {
|
||||
return o.Opaque()
|
||||
}
|
||||
b := m.Bounds()
|
||||
for y := b.Min.Y; y < b.Max.Y; y++ {
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
_, _, _, a := m.At(x, y).RGBA()
|
||||
if a != 0xffff {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// The absolute value of a byte interpreted as a signed int8.
|
||||
func abs8(d uint8) int {
|
||||
if d < 128 {
|
||||
return int(d)
|
||||
}
|
||||
return 256 - int(d)
|
||||
}
|
||||
|
||||
func (e *encoder) writeChunk(b []byte, name string) {
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
n := uint32(len(b))
|
||||
if int(n) != len(b) {
|
||||
e.err = UnsupportedError(name + " chunk is too large: " + strconv.Itoa(len(b)))
|
||||
return
|
||||
}
|
||||
binary.BigEndian.PutUint32(e.header[:4], n)
|
||||
e.header[4] = name[0]
|
||||
e.header[5] = name[1]
|
||||
e.header[6] = name[2]
|
||||
e.header[7] = name[3]
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(e.header[4:8])
|
||||
crc.Write(b)
|
||||
binary.BigEndian.PutUint32(e.footer[:4], crc.Sum32())
|
||||
|
||||
_, e.err = e.w.Write(e.header[:8])
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
_, e.err = e.w.Write(b)
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
_, e.err = e.w.Write(e.footer[:4])
|
||||
}
|
||||
|
||||
func (e *encoder) writeIHDR() {
|
||||
b := e.a.Frames[0].Image.Bounds()
|
||||
binary.BigEndian.PutUint32(e.tmp[0:4], uint32(b.Dx()))
|
||||
binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dy()))
|
||||
// Set bit depth and color type.
|
||||
switch e.cb {
|
||||
case cbG8:
|
||||
e.tmp[8] = 8
|
||||
e.tmp[9] = ctGrayscale
|
||||
case cbTC8:
|
||||
e.tmp[8] = 8
|
||||
e.tmp[9] = ctTrueColor
|
||||
case cbP8:
|
||||
e.tmp[8] = 8
|
||||
e.tmp[9] = ctPaletted
|
||||
case cbP4:
|
||||
e.tmp[8] = 4
|
||||
e.tmp[9] = ctPaletted
|
||||
case cbP2:
|
||||
e.tmp[8] = 2
|
||||
e.tmp[9] = ctPaletted
|
||||
case cbP1:
|
||||
e.tmp[8] = 1
|
||||
e.tmp[9] = ctPaletted
|
||||
case cbTCA8:
|
||||
e.tmp[8] = 8
|
||||
e.tmp[9] = ctTrueColorAlpha
|
||||
case cbG16:
|
||||
e.tmp[8] = 16
|
||||
e.tmp[9] = ctGrayscale
|
||||
case cbTC16:
|
||||
e.tmp[8] = 16
|
||||
e.tmp[9] = ctTrueColor
|
||||
case cbTCA16:
|
||||
e.tmp[8] = 16
|
||||
e.tmp[9] = ctTrueColorAlpha
|
||||
}
|
||||
e.tmp[10] = 0 // default compression method
|
||||
e.tmp[11] = 0 // default filter method
|
||||
e.tmp[12] = 0 // non-interlaced
|
||||
e.writeChunk(e.tmp[:13], "IHDR")
|
||||
}
|
||||
|
||||
func (e *encoder) writeacTL() {
|
||||
binary.BigEndian.PutUint32(e.tmp[0:4], uint32(len(e.a.Frames)))
|
||||
binary.BigEndian.PutUint32(e.tmp[4:8], uint32(e.a.LoopCount))
|
||||
e.writeChunk(e.tmp[:8], "acTL")
|
||||
}
|
||||
|
||||
func (e *encoder) writefcTL(f Frame) {
|
||||
binary.BigEndian.PutUint32(e.tmp[0:4], uint32(e.seq))
|
||||
e.seq = e.seq + 1
|
||||
b := f.Image.Bounds()
|
||||
binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dx()))
|
||||
binary.BigEndian.PutUint32(e.tmp[8:12], uint32(b.Dy()))
|
||||
binary.BigEndian.PutUint32(e.tmp[12:16], uint32(f.XOffset))
|
||||
binary.BigEndian.PutUint32(e.tmp[16:20], uint32(f.YOffset))
|
||||
binary.BigEndian.PutUint16(e.tmp[20:22], f.DelayNumerator)
|
||||
binary.BigEndian.PutUint16(e.tmp[22:24], f.DelayDenominator)
|
||||
e.tmp[24] = f.DisposeOp
|
||||
e.tmp[25] = f.BlendOp
|
||||
e.writeChunk(e.tmp[:26], "fcTL")
|
||||
}
|
||||
|
||||
func (e *encoder) writefdATs(f Frame) {
|
||||
e.writeType = 1
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
if e.bw == nil {
|
||||
e.bw = bufio.NewWriterSize(e, 1<<15)
|
||||
} else {
|
||||
e.bw.Reset(e)
|
||||
}
|
||||
e.err = e.writeImage(e.bw, f.Image, e.cb, e.enc.CompressionLevel)
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
e.err = e.bw.Flush()
|
||||
}
|
||||
|
||||
func (e *encoder) writePLTEAndTRNS(p color.Palette) {
|
||||
if len(p) < 1 || len(p) > 256 {
|
||||
e.err = FormatError("bad palette length: " + strconv.Itoa(len(p)))
|
||||
return
|
||||
}
|
||||
last := -1
|
||||
for i, c := range p {
|
||||
c1 := color.NRGBAModel.Convert(c).(color.NRGBA)
|
||||
e.tmp[3*i+0] = c1.R
|
||||
e.tmp[3*i+1] = c1.G
|
||||
e.tmp[3*i+2] = c1.B
|
||||
if c1.A != 0xff {
|
||||
last = i
|
||||
}
|
||||
e.tmp[3*256+i] = c1.A
|
||||
}
|
||||
e.writeChunk(e.tmp[:3*len(p)], "PLTE")
|
||||
if last != -1 {
|
||||
e.writeChunk(e.tmp[3*256:3*256+1+last], "tRNS")
|
||||
}
|
||||
}
|
||||
|
||||
// An encoder is an io.Writer that satisfies writes by writing PNG IDAT chunks,
|
||||
// including an 8-byte header and 4-byte CRC checksum per Write call. Such calls
|
||||
// should be relatively infrequent, since writeIDATs uses a bufio.Writer.
|
||||
//
|
||||
// This method should only be called from writeIDATs (via writeImage).
|
||||
// No other code should treat an encoder as an io.Writer.
|
||||
func (e *encoder) Write(b []byte) (int, error) {
|
||||
if e.writeType == 0 {
|
||||
e.writeChunk(b, "IDAT")
|
||||
} else {
|
||||
c := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(c[0:4], uint32(e.seq))
|
||||
e.seq = e.seq + 1
|
||||
b = append(c, b...)
|
||||
e.writeChunk(b, "fdAT")
|
||||
}
|
||||
if e.err != nil {
|
||||
return 0, e.err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Chooses the filter to use for encoding the current row, and applies it.
|
||||
// The return value is the index of the filter and also of the row in cr that has had it applied.
|
||||
func filter(cr *[nFilter][]byte, pr []byte, bpp int) int {
|
||||
// We try all five filter types, and pick the one that minimizes the sum of absolute differences.
|
||||
// This is the same heuristic that libpng uses, although the filters are attempted in order of
|
||||
// estimated most likely to be minimal (ftUp, ftPaeth, ftNone, ftSub, ftAverage), rather than
|
||||
// in their enumeration order (ftNone, ftSub, ftUp, ftAverage, ftPaeth).
|
||||
cdat0 := cr[0][1:]
|
||||
cdat1 := cr[1][1:]
|
||||
cdat2 := cr[2][1:]
|
||||
cdat3 := cr[3][1:]
|
||||
cdat4 := cr[4][1:]
|
||||
pdat := pr[1:]
|
||||
n := len(cdat0)
|
||||
|
||||
// The up filter.
|
||||
sum := 0
|
||||
for i := range n {
|
||||
cdat2[i] = cdat0[i] - pdat[i]
|
||||
sum += abs8(cdat2[i])
|
||||
}
|
||||
best := sum
|
||||
filter := ftUp
|
||||
|
||||
// The Paeth filter.
|
||||
sum = 0
|
||||
for i := range bpp {
|
||||
cdat4[i] = cdat0[i] - pdat[i]
|
||||
sum += abs8(cdat4[i])
|
||||
}
|
||||
for i := bpp; i < n; i++ {
|
||||
cdat4[i] = cdat0[i] - paeth(cdat0[i-bpp], pdat[i], pdat[i-bpp])
|
||||
sum += abs8(cdat4[i])
|
||||
if sum >= best {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sum < best {
|
||||
best = sum
|
||||
filter = ftPaeth
|
||||
}
|
||||
|
||||
// The none filter.
|
||||
sum = 0
|
||||
for i := range n {
|
||||
sum += abs8(cdat0[i])
|
||||
if sum >= best {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sum < best {
|
||||
best = sum
|
||||
filter = ftNone
|
||||
}
|
||||
|
||||
// The sub filter.
|
||||
sum = 0
|
||||
for i := range bpp {
|
||||
cdat1[i] = cdat0[i]
|
||||
sum += abs8(cdat1[i])
|
||||
}
|
||||
for i := bpp; i < n; i++ {
|
||||
cdat1[i] = cdat0[i] - cdat0[i-bpp]
|
||||
sum += abs8(cdat1[i])
|
||||
if sum >= best {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sum < best {
|
||||
best = sum
|
||||
filter = ftSub
|
||||
}
|
||||
|
||||
// The average filter.
|
||||
sum = 0
|
||||
for i := range bpp {
|
||||
cdat3[i] = cdat0[i] - pdat[i]/2
|
||||
sum += abs8(cdat3[i])
|
||||
}
|
||||
for i := bpp; i < n; i++ {
|
||||
cdat3[i] = cdat0[i] - uint8((int(cdat0[i-bpp])+int(pdat[i]))/2)
|
||||
sum += abs8(cdat3[i])
|
||||
if sum >= best {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sum < best {
|
||||
filter = ftAverage
|
||||
}
|
||||
|
||||
return filter
|
||||
}
|
||||
|
||||
func zeroMemory(v []uint8) {
|
||||
for i := range v {
|
||||
v[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeImage(w io.Writer, m image.Image, cb int, level CompressionLevel) error {
|
||||
if e.zw == nil || e.zwLevel != level {
|
||||
if e.enc.CompressionWriter != nil {
|
||||
zw, err := e.enc.CompressionWriter(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.zw = zw
|
||||
} else {
|
||||
zw, err := zlib.NewWriterLevel(w, levelToZlib(level))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.zw = zw
|
||||
}
|
||||
e.zwLevel = level
|
||||
} else {
|
||||
e.zw.Reset(w)
|
||||
}
|
||||
defer e.zw.Close()
|
||||
|
||||
bitsPerPixel := 0
|
||||
|
||||
switch cb {
|
||||
case cbG8:
|
||||
bitsPerPixel = 8
|
||||
case cbTC8:
|
||||
bitsPerPixel = 24
|
||||
case cbP8:
|
||||
bitsPerPixel = 8
|
||||
case cbP4:
|
||||
bitsPerPixel = 4
|
||||
case cbP2:
|
||||
bitsPerPixel = 2
|
||||
case cbP1:
|
||||
bitsPerPixel = 1
|
||||
case cbTCA8:
|
||||
bitsPerPixel = 32
|
||||
case cbTC16:
|
||||
bitsPerPixel = 48
|
||||
case cbTCA16:
|
||||
bitsPerPixel = 64
|
||||
case cbG16:
|
||||
bitsPerPixel = 16
|
||||
}
|
||||
|
||||
// cr[*] and pr are the bytes for the current and previous row.
|
||||
// cr[0] is unfiltered (or equivalently, filtered with the ftNone filter).
|
||||
// cr[ft], for non-zero filter types ft, are buffers for transforming cr[0] under the
|
||||
// other PNG filter types. These buffers are allocated once and re-used for each row.
|
||||
// The +1 is for the per-row filter type, which is at cr[*][0].
|
||||
b := m.Bounds()
|
||||
sz := 1 + (bitsPerPixel*b.Dx()+7)/8
|
||||
for i := range e.cr {
|
||||
if cap(e.cr[i]) < sz {
|
||||
e.cr[i] = make([]uint8, sz)
|
||||
} else {
|
||||
e.cr[i] = e.cr[i][:sz]
|
||||
}
|
||||
e.cr[i][0] = uint8(i)
|
||||
}
|
||||
cr := e.cr
|
||||
if cap(e.pr) < sz {
|
||||
e.pr = make([]uint8, sz)
|
||||
} else {
|
||||
e.pr = e.pr[:sz]
|
||||
zeroMemory(e.pr)
|
||||
}
|
||||
pr := e.pr
|
||||
|
||||
gray, _ := m.(*image.Gray)
|
||||
rgba, _ := m.(*image.RGBA)
|
||||
paletted, _ := m.(*image.Paletted)
|
||||
nrgba, _ := m.(*image.NRGBA)
|
||||
|
||||
for y := b.Min.Y; y < b.Max.Y; y++ {
|
||||
// Convert from colors to bytes.
|
||||
i := 1
|
||||
switch cb {
|
||||
case cbG8:
|
||||
if gray != nil {
|
||||
offset := (y - b.Min.Y) * gray.Stride
|
||||
copy(cr[0][1:], gray.Pix[offset:offset+b.Dx()])
|
||||
} else {
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
c := color.GrayModel.Convert(m.At(x, y)).(color.Gray)
|
||||
cr[0][i] = c.Y
|
||||
i++
|
||||
}
|
||||
}
|
||||
case cbTC8:
|
||||
// We have previously verified that the alpha value is fully opaque.
|
||||
cr0 := cr[0]
|
||||
stride, pix := 0, []byte(nil)
|
||||
if rgba != nil {
|
||||
stride, pix = rgba.Stride, rgba.Pix
|
||||
} else if nrgba != nil {
|
||||
stride, pix = nrgba.Stride, nrgba.Pix
|
||||
}
|
||||
if stride != 0 {
|
||||
j0 := (y - b.Min.Y) * stride
|
||||
j1 := j0 + b.Dx()*4
|
||||
for j := j0; j < j1; j += 4 {
|
||||
cr0[i+0] = pix[j+0]
|
||||
cr0[i+1] = pix[j+1]
|
||||
cr0[i+2] = pix[j+2]
|
||||
i += 3
|
||||
}
|
||||
} else {
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
r, g, b, _ := m.At(x, y).RGBA()
|
||||
cr0[i+0] = uint8(r >> 8)
|
||||
cr0[i+1] = uint8(g >> 8)
|
||||
cr0[i+2] = uint8(b >> 8)
|
||||
i += 3
|
||||
}
|
||||
}
|
||||
case cbP8:
|
||||
if paletted != nil {
|
||||
offset := (y - b.Min.Y) * paletted.Stride
|
||||
copy(cr[0][1:], paletted.Pix[offset:offset+b.Dx()])
|
||||
} else {
|
||||
pi := m.(image.PalettedImage)
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
cr[0][i] = pi.ColorIndexAt(x, y)
|
||||
i += 1
|
||||
}
|
||||
}
|
||||
|
||||
case cbP4, cbP2, cbP1:
|
||||
pi := m.(image.PalettedImage)
|
||||
|
||||
var a uint8
|
||||
var c int
|
||||
pixelsPerByte := 8 / bitsPerPixel
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
a = a<<uint(bitsPerPixel) | pi.ColorIndexAt(x, y)
|
||||
c++
|
||||
if c == pixelsPerByte {
|
||||
cr[0][i] = a
|
||||
i += 1
|
||||
a = 0
|
||||
c = 0
|
||||
}
|
||||
}
|
||||
if c != 0 {
|
||||
for c != pixelsPerByte {
|
||||
a = a << uint(bitsPerPixel)
|
||||
c++
|
||||
}
|
||||
cr[0][i] = a
|
||||
}
|
||||
|
||||
case cbTCA8:
|
||||
if nrgba != nil {
|
||||
offset := (y - b.Min.Y) * nrgba.Stride
|
||||
copy(cr[0][1:], nrgba.Pix[offset:offset+b.Dx()*4])
|
||||
} else {
|
||||
// Convert from image.Image (which is alpha-premultiplied) to PNG's non-alpha-premultiplied.
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
c := color.NRGBAModel.Convert(m.At(x, y)).(color.NRGBA)
|
||||
cr[0][i+0] = c.R
|
||||
cr[0][i+1] = c.G
|
||||
cr[0][i+2] = c.B
|
||||
cr[0][i+3] = c.A
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
case cbG16:
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
c := color.Gray16Model.Convert(m.At(x, y)).(color.Gray16)
|
||||
cr[0][i+0] = uint8(c.Y >> 8)
|
||||
cr[0][i+1] = uint8(c.Y)
|
||||
i += 2
|
||||
}
|
||||
case cbTC16:
|
||||
// We have previously verified that the alpha value is fully opaque.
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
r, g, b, _ := m.At(x, y).RGBA()
|
||||
cr[0][i+0] = uint8(r >> 8)
|
||||
cr[0][i+1] = uint8(r)
|
||||
cr[0][i+2] = uint8(g >> 8)
|
||||
cr[0][i+3] = uint8(g)
|
||||
cr[0][i+4] = uint8(b >> 8)
|
||||
cr[0][i+5] = uint8(b)
|
||||
i += 6
|
||||
}
|
||||
case cbTCA16:
|
||||
// Convert from image.Image (which is alpha-premultiplied) to PNG's non-alpha-premultiplied.
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
c := color.NRGBA64Model.Convert(m.At(x, y)).(color.NRGBA64)
|
||||
cr[0][i+0] = uint8(c.R >> 8)
|
||||
cr[0][i+1] = uint8(c.R)
|
||||
cr[0][i+2] = uint8(c.G >> 8)
|
||||
cr[0][i+3] = uint8(c.G)
|
||||
cr[0][i+4] = uint8(c.B >> 8)
|
||||
cr[0][i+5] = uint8(c.B)
|
||||
cr[0][i+6] = uint8(c.A >> 8)
|
||||
cr[0][i+7] = uint8(c.A)
|
||||
i += 8
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the filter.
|
||||
// Skip filter for NoCompression and paletted images (cbP8) as
|
||||
// "filters are rarely useful on palette images" and will result
|
||||
// in larger files (see http://www.libpng.org/pub/png/book/chapter09.html).
|
||||
f := ftNone
|
||||
if level != zlib.NoCompression && cb != cbP8 && cb != cbP4 && cb != cbP2 && cb != cbP1 {
|
||||
// Since we skip paletted images we don't have to worry about
|
||||
// bitsPerPixel not being a multiple of 8
|
||||
bpp := bitsPerPixel / 8
|
||||
f = filter(&cr, pr, bpp)
|
||||
}
|
||||
|
||||
// Write the compressed bytes.
|
||||
if _, err := e.zw.Write(cr[f]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The current row for y is the previous row for y+1.
|
||||
pr, cr[0] = cr[0], pr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the actual image data to one or more IDAT chunks.
|
||||
func (e *encoder) writeIDATs() {
|
||||
e.writeType = 0
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
if e.bw == nil {
|
||||
e.bw = bufio.NewWriterSize(e, 1<<15)
|
||||
} else {
|
||||
e.bw.Reset(e)
|
||||
}
|
||||
e.err = e.writeImage(e.bw, e.a.Frames[0].Image, e.cb, e.enc.CompressionLevel)
|
||||
if e.err != nil {
|
||||
return
|
||||
}
|
||||
e.err = e.bw.Flush()
|
||||
}
|
||||
|
||||
// This function is required because we want the zero value of
|
||||
// Encoder.CompressionLevel to map to zlib.DefaultCompression.
|
||||
func levelToZlib(l CompressionLevel) int {
|
||||
switch l {
|
||||
case DefaultCompression:
|
||||
return zlib.DefaultCompression
|
||||
case NoCompression:
|
||||
return zlib.NoCompression
|
||||
case BestSpeed:
|
||||
return zlib.BestSpeed
|
||||
case BestCompression:
|
||||
return zlib.BestCompression
|
||||
default:
|
||||
return zlib.DefaultCompression
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeIEND() { e.writeChunk(nil, "IEND") }
|
||||
|
||||
// Encode writes the APNG a to w in PNG format. Any Image may be
|
||||
// encoded, but images that are not image.NRGBA might be encoded lossily.
|
||||
func Encode(w io.Writer, a APNG) error {
|
||||
var e Encoder
|
||||
return e.Encode(w, a)
|
||||
}
|
||||
|
||||
// Encode writes the Animation a to w in PNG format.
|
||||
func (enc *Encoder) Encode(w io.Writer, a APNG) error {
|
||||
// Obviously, negative widths and heights are invalid. Furthermore, the PNG
|
||||
// spec section 11.2.2 says that zero is invalid. Excessively large images are
|
||||
// also rejected.
|
||||
mw, mh := int64(a.Frames[0].Image.Bounds().Dx()), int64(a.Frames[0].Image.Bounds().Dy())
|
||||
if mw <= 0 || mh <= 0 || mw >= 1<<32 || mh >= 1<<32 {
|
||||
return FormatError("invalid image size: " + strconv.FormatInt(mw, 10) + "x" + strconv.FormatInt(mh, 10))
|
||||
}
|
||||
|
||||
var e *encoder
|
||||
if enc.BufferPool != nil {
|
||||
buffer := enc.BufferPool.Get()
|
||||
e = (*encoder)(buffer)
|
||||
e.seq = 0
|
||||
}
|
||||
if e == nil {
|
||||
e = &encoder{}
|
||||
}
|
||||
if enc.BufferPool != nil {
|
||||
defer enc.BufferPool.Put((*EncoderBuffer)(e))
|
||||
}
|
||||
|
||||
e.enc = enc
|
||||
e.w = w
|
||||
e.a = a
|
||||
|
||||
var pal color.Palette
|
||||
// cbP8 encoding needs PalettedImage's ColorIndexAt method.
|
||||
if _, ok := a.Frames[0].Image.(image.PalettedImage); ok {
|
||||
pal, _ = a.Frames[0].Image.ColorModel().(color.Palette)
|
||||
}
|
||||
if pal != nil {
|
||||
if len(pal) <= 2 {
|
||||
e.cb = cbP1
|
||||
} else if len(pal) <= 4 {
|
||||
e.cb = cbP2
|
||||
} else if len(pal) <= 16 {
|
||||
e.cb = cbP4
|
||||
} else {
|
||||
e.cb = cbP8
|
||||
}
|
||||
} else {
|
||||
switch a.Frames[0].Image.ColorModel() {
|
||||
case color.GrayModel:
|
||||
e.cb = cbG8
|
||||
case color.Gray16Model:
|
||||
e.cb = cbG16
|
||||
case color.RGBAModel, color.NRGBAModel, color.AlphaModel:
|
||||
isOpaque := true
|
||||
for _, v := range a.Frames {
|
||||
if !opaque(v.Image) {
|
||||
isOpaque = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isOpaque {
|
||||
e.cb = cbTC8
|
||||
} else {
|
||||
e.cb = cbTCA8
|
||||
}
|
||||
default:
|
||||
isOpaque := true
|
||||
for _, v := range a.Frames {
|
||||
if !opaque(v.Image) {
|
||||
isOpaque = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isOpaque {
|
||||
e.cb = cbTC16
|
||||
} else {
|
||||
e.cb = cbTCA16
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, e.err = io.WriteString(w, pngHeader)
|
||||
e.writeIHDR()
|
||||
if pal != nil {
|
||||
e.writePLTEAndTRNS(pal)
|
||||
}
|
||||
if len(e.a.Frames) > 1 {
|
||||
e.writeacTL()
|
||||
}
|
||||
if !e.a.Frames[0].IsDefault {
|
||||
e.writefcTL(e.a.Frames[0])
|
||||
}
|
||||
e.writeIDATs()
|
||||
for i := 0; i < len(e.a.Frames); i = i + 1 {
|
||||
if i != 0 && !e.a.Frames[i].IsDefault {
|
||||
e.writefcTL(e.a.Frames[i])
|
||||
e.writefdATs(e.a.Frames[i])
|
||||
}
|
||||
}
|
||||
e.writeIEND()
|
||||
return e.err
|
||||
}
|
||||
250
vendor/github.com/kovidgoyal/imaging/color_space_conversion.go
generated
vendored
Normal file
250
vendor/github.com/kovidgoyal/imaging/color_space_conversion.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/go-parallel"
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// unpremultiply and convert to normalized float
|
||||
func unpremultiply8(r_, a_ uint8) float64 {
|
||||
r, a := uint16(r_), uint16(a_)
|
||||
return float64((r*math.MaxUint8)/a) / math.MaxUint8
|
||||
}
|
||||
|
||||
// unpremultiply and convert to normalized float
|
||||
func unpremultiply(r, a uint32) float64 {
|
||||
return float64((r*math.MaxUint16)/a) / math.MaxUint16
|
||||
}
|
||||
|
||||
func f8(x uint8) float64 { return float64(x) / math.MaxUint8 }
|
||||
func f8i(x float64) uint8 { return uint8(x * math.MaxUint8) }
|
||||
func f16(x uint16) float64 { return float64(x) / math.MaxUint16 }
|
||||
func f16i(x float64) uint16 { return uint16(x * math.MaxUint16) }
|
||||
|
||||
func convert(tr *icc.Pipeline, image_any image.Image) (ans image.Image, err error) {
|
||||
t := tr.Transform
|
||||
b := image_any.Bounds()
|
||||
width, height := b.Dx(), b.Dy()
|
||||
ans = image_any
|
||||
var f func(start, limit int)
|
||||
switch img := image_any.(type) {
|
||||
case *NRGB:
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
_ = row[3*(width-1)]
|
||||
for range width {
|
||||
r := row[0:3:3]
|
||||
fr, fg, fb := t(f8(r[0]), f8(r[1]), f8(r[2]))
|
||||
r[0], r[1], r[2] = f8i(fr), f8i(fg), f8i(fb)
|
||||
row = row[3:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.NRGBA:
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
_ = row[4*(width-1)]
|
||||
for range width {
|
||||
r := row[0:3:3]
|
||||
fr, fg, fb := t(f8(r[0]), f8(r[1]), f8(r[2]))
|
||||
r[0], r[1], r[2] = f8i(fr), f8i(fg), f8i(fb)
|
||||
row = row[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.NRGBA64:
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
_ = row[8*(width-1)]
|
||||
for range width {
|
||||
s := row[0:8:8]
|
||||
fr := f16(uint16(s[0])<<8 | uint16(s[1]))
|
||||
fg := f16(uint16(s[2])<<8 | uint16(s[3]))
|
||||
fb := f16(uint16(s[4])<<8 | uint16(s[5]))
|
||||
fr, fg, fb = t(fr, fg, fb)
|
||||
r, g, b := f16i(fr), f16i(fg), f16i(fb)
|
||||
s[0], s[1] = uint8(r>>8), uint8(r)
|
||||
s[2], s[3] = uint8(g>>8), uint8(g)
|
||||
s[4], s[5] = uint8(b>>8), uint8(b)
|
||||
row = row[8:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.RGBA:
|
||||
d := image.NewNRGBA(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
drow := d.Pix[d.Stride*y:]
|
||||
_ = row[4*(width-1)]
|
||||
_ = drow[4*(width-1)]
|
||||
for range width {
|
||||
r := row[0:4:4]
|
||||
dr := drow[0:4:4]
|
||||
dr[3] = r[3]
|
||||
if a := row[3]; a != 0 {
|
||||
fr, fg, fb := t(unpremultiply8(r[0], a), unpremultiply8(r[1], a), unpremultiply8(r[2], a))
|
||||
dr[0], dr[1], dr[2] = f8i(fr), f8i(fg), f8i(fb)
|
||||
}
|
||||
row = row[4:]
|
||||
drow = drow[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.RGBA64:
|
||||
d := image.NewNRGBA64(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
drow := d.Pix[d.Stride*y:]
|
||||
_ = row[8*(width-1)]
|
||||
_ = drow[8*(width-1)]
|
||||
for range width {
|
||||
s, dr := row[0:8:8], drow[0:8:8]
|
||||
dr[6], dr[7] = s[6], s[7]
|
||||
a := uint32(s[6])<<8 | uint32(s[7])
|
||||
if a != 0 {
|
||||
fr := unpremultiply((uint32(s[0])<<8 | uint32(s[1])), a)
|
||||
fg := unpremultiply((uint32(s[2])<<8 | uint32(s[3])), a)
|
||||
fb := unpremultiply((uint32(s[4])<<8 | uint32(s[5])), a)
|
||||
fr, fg, fb = t(fr, fg, fb)
|
||||
r, g, b := f16i(fr), f16i(fg), f16i(fb)
|
||||
dr[0], dr[1] = uint8(r>>8), uint8(r)
|
||||
dr[2], dr[3] = uint8(g>>8), uint8(g)
|
||||
dr[4], dr[5] = uint8(b>>8), uint8(b)
|
||||
}
|
||||
row = row[8:]
|
||||
drow = drow[8:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.Paletted:
|
||||
for i, c := range img.Palette {
|
||||
r, g, b, a := c.RGBA()
|
||||
if a != 0 {
|
||||
fr, fg, fb := unpremultiply(r, a), unpremultiply(g, a), unpremultiply(b, a)
|
||||
fr, fg, fb = t(fr, fg, fb)
|
||||
img.Palette[i] = color.NRGBA64{R: f16i(fr), G: f16i(fg), B: f16i(fb), A: uint16(a)}
|
||||
}
|
||||
}
|
||||
return
|
||||
case *image.CMYK:
|
||||
g := tr.TransformGeneral
|
||||
d := nrgb.NewNRGB(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
var inp, outp [4]float64
|
||||
i, o := inp[:], outp[:]
|
||||
for y := start; y < limit; y++ {
|
||||
row := img.Pix[img.Stride*y:]
|
||||
drow := d.Pix[d.Stride*y:]
|
||||
_ = row[4*(width-1)]
|
||||
_ = drow[3*(width-1)]
|
||||
for range width {
|
||||
r := row[0:4:4]
|
||||
inp[0], inp[1], inp[2], inp[3] = f8(r[0]), f8(r[1]), f8(r[2]), f8(r[3])
|
||||
g(o, i)
|
||||
r = drow[0:3:3]
|
||||
r[0], r[1], r[2] = f8i(outp[0]), f8i(outp[1]), f8i(outp[2])
|
||||
row = row[4:]
|
||||
drow = drow[3:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.YCbCr:
|
||||
d := nrgb.NewNRGB(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
ybase := y * img.YStride
|
||||
row := d.Pix[d.Stride*y:]
|
||||
yy := y + b.Min.Y
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
iy := ybase + (x - b.Min.X)
|
||||
ic := img.COffset(x, yy)
|
||||
// We use this rather than color.YCbCrToRGB for greater accuracy
|
||||
r, g, bb, _ := color.YCbCr{img.Y[iy], img.Cb[ic], img.Cr[ic]}.RGBA()
|
||||
fr, fg, fb := t(f16(uint16(r)), f16(uint16(g)), f16(uint16(bb)))
|
||||
rr := row[0:3:3]
|
||||
rr[0], rr[1], rr[2] = f8i(fr), f8i(fg), f8i(fb)
|
||||
row = row[3:]
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.NYCbCrA:
|
||||
d := image.NewNRGBA(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
ybase := y * img.YStride
|
||||
row := d.Pix[d.Stride*y:]
|
||||
yy := y + b.Min.Y
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
rr := row[0:4:4]
|
||||
rr[3] = img.A[img.AOffset(x, yy)]
|
||||
if rr[3] != 0 {
|
||||
iy := ybase + (x - b.Min.X)
|
||||
ic := img.COffset(x, yy)
|
||||
// We use this rather than color.YCbCrToRGB for greater accuracy
|
||||
r, g, bb, _ := color.YCbCr{img.Y[iy], img.Cb[ic], img.Cr[ic]}.RGBA()
|
||||
fr, fg, fb := t(f16(uint16(r)), f16(uint16(g)), f16(uint16(bb)))
|
||||
rr[0], rr[1], rr[2] = f8i(fr), f8i(fg), f8i(fb)
|
||||
row = row[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case draw.Image:
|
||||
f = func(start, limit int) {
|
||||
for y := b.Min.Y + start; y < b.Min.Y+limit; y++ {
|
||||
for x := b.Min.X; x < b.Max.X; x++ {
|
||||
r16, g16, b16, a16 := img.At(x, y).RGBA()
|
||||
if a16 != 0 {
|
||||
fr, fg, fb := unpremultiply(r16, a16), unpremultiply(g16, a16), unpremultiply(b16, a16)
|
||||
fr, fg, fb = t(fr, fg, fb)
|
||||
img.Set(x, y, &color.NRGBA64{R: f16i(fr), G: f16i(fg), B: f16i(fb)})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
d := image.NewNRGBA64(b)
|
||||
ans = d
|
||||
f = func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
row := d.Pix[d.Stride*y:]
|
||||
for x := range width {
|
||||
r16, g16, b16, a16 := img.At(x+b.Min.X, y+b.Min.Y).RGBA()
|
||||
if a16 != 0 {
|
||||
fr, fg, fb := unpremultiply(r16, a16), unpremultiply(g16, a16), unpremultiply(b16, a16)
|
||||
fr, fg, fb = t(fr, fg, fb)
|
||||
r, g, b := f16i(fr), f16i(fg), f16i(fb)
|
||||
s := row[0:8:8]
|
||||
row = row[8:]
|
||||
s[0], s[1] = uint8(r>>8), uint8(r)
|
||||
s[2], s[3] = uint8(g>>8), uint8(g)
|
||||
s[4], s[5] = uint8(b>>8), uint8(b)
|
||||
s[6] = uint8(a16 >> 8)
|
||||
s[7] = uint8(a16)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
err = parallel.Run_in_parallel_over_range(0, f, 0, height)
|
||||
return
|
||||
}
|
||||
29
vendor/github.com/kovidgoyal/imaging/color_space_conversion_api.go
generated
vendored
Normal file
29
vendor/github.com/kovidgoyal/imaging/color_space_conversion_api.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// Convert to SRGB based on the supplied ICC color profile. The result
|
||||
// may be either the original image unmodified if no color
|
||||
// conversion was needed, the original image modified, or a new image (when the original image
|
||||
// is not in a supported format).
|
||||
func ConvertToSRGB(p *icc.Profile, intent icc.RenderingIntent, use_blackpoint_compensation bool, image_any image.Image) (ans image.Image, err error) {
|
||||
if p.IsSRGB() {
|
||||
return image_any, nil
|
||||
}
|
||||
num_channels := 3
|
||||
if _, is_cmyk := image_any.(*image.CMYK); is_cmyk {
|
||||
num_channels = 4
|
||||
}
|
||||
tr, err := p.CreateTransformerToSRGB(intent, use_blackpoint_compensation, num_channels, true, true, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convert(tr, image_any)
|
||||
}
|
||||
468
vendor/github.com/kovidgoyal/imaging/colorconv/colorconv.go
generated
vendored
Normal file
468
vendor/github.com/kovidgoyal/imaging/colorconv/colorconv.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
package colorconv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// This package converts CIE L*a*b* colors defined relative to the D50 white point
|
||||
// into sRGB values relative to D65. It performs chromatic
|
||||
// adaptation (Bradford), fuses linear matrix transforms where possible for speed,
|
||||
// and does a simple perceptually-minded gamut mapping by scaling chroma (a,b)
|
||||
// down towards zero until the resulting sRGB is inside the [0,1] cube.
|
||||
//
|
||||
// Notes:
|
||||
// - Input L,a,b are the usual CIELAB values (L in [0,100], a,b around -/+).
|
||||
// - Returned sRGB values are in [0,1]. If gamut mapping fails (rare), values
|
||||
// will be clipped to [0,1] as a fallback.
|
||||
//
|
||||
// The code fuses the chromatic adaptation and XYZ->linear-sRGB matrices into a
|
||||
// single 3x3 matrix so that the only linear operation after Lab->XYZ is a single
|
||||
// matrix multiply.
|
||||
|
||||
type Vec3 [3]float64
|
||||
type Mat3 [3][3]float64
|
||||
|
||||
var whiteD65 = Vec3{0.95047, 1.00000, 1.08883}
|
||||
|
||||
func (m *Mat3) String() string {
|
||||
return fmt.Sprintf("Matrix3{ %.6v %.6v %.6v }", m[0], m[1], m[2])
|
||||
}
|
||||
|
||||
// Standard reference whites (CIE XYZ) normalized so Y = 1.0
|
||||
// Note that WhiteD50 uses Z value from ICC spec rather that CIE spec.
|
||||
var WhiteD50 = Vec3{0.96422, 1.00000, 0.82491}
|
||||
|
||||
type ConvertColor struct {
|
||||
whitepoint Vec3
|
||||
// Precomputed combined matrix from XYZ(whitepoint) directly to linear sRGB (D65).
|
||||
// Combined = srgbFromXYZ * adaptMatrix (where adaptMatrix adapts XYZ D50 -> XYZ D65).
|
||||
combined_XYZ_to_linear_SRGB Mat3
|
||||
}
|
||||
|
||||
func (c ConvertColor) String() string {
|
||||
return fmt.Sprintf("{whitepoint:%.6v matrix:%.6v}", c.whitepoint, c.combined_XYZ_to_linear_SRGB)
|
||||
}
|
||||
|
||||
func (cc *ConvertColor) AddPreviousMatrix(a, b, c [3]float64) {
|
||||
prev := Mat3{a, b, c}
|
||||
cc.combined_XYZ_to_linear_SRGB = mulMat3(cc.combined_XYZ_to_linear_SRGB, prev)
|
||||
}
|
||||
|
||||
func NewConvertColor(whitepoint_x, whitepoint_y, whitepoint_z, scale float64) (ans *ConvertColor) {
|
||||
ans = &ConvertColor{whitepoint: Vec3{whitepoint_x, whitepoint_y, whitepoint_z}}
|
||||
adapt := chromaticAdaptationMatrix(ans.whitepoint, whiteD65)
|
||||
// sRGB (linear) transform matrix from CIE XYZ (D65)
|
||||
var srgbFromXYZ = Mat3{
|
||||
{3.2406 * scale, -1.5372 * scale, -0.4986 * scale},
|
||||
{-0.9689 * scale, 1.8758 * scale, 0.0415 * scale},
|
||||
{0.0557 * scale, -0.2040 * scale, 1.0570 * scale},
|
||||
}
|
||||
ans.combined_XYZ_to_linear_SRGB = mulMat3(srgbFromXYZ, adapt)
|
||||
return
|
||||
}
|
||||
|
||||
func NewStandardConvertColor() (ans *ConvertColor) {
|
||||
return NewConvertColor(WhiteD50[0], WhiteD50[1], WhiteD50[2], 1)
|
||||
}
|
||||
|
||||
// LabToSRGB converts a Lab color (at the specified whitepoint) into sRGB (D65) with gamut mapping.
|
||||
// Returned components are in [0,1].
|
||||
func (c *ConvertColor) LabToSRGB(L, a, b float64) (r, g, bl float64) {
|
||||
// fast path: try direct conversion and only do gamut mapping if out of gamut
|
||||
r0, g0, b0 := c.LabToSRGBNoGamutMap(L, a, b)
|
||||
if inGamut(r0, g0, b0) {
|
||||
return r0, g0, b0
|
||||
}
|
||||
// gamut map by scaling chroma (a,b) toward 0 while keeping L constant.
|
||||
rm, gm, bm := c.gamutMapChromaScale(L, a, b)
|
||||
return rm, gm, bm
|
||||
}
|
||||
|
||||
// LabToSRGBNoGamutMap converts Lab(whitepoint) to sRGB(D65) without doing any gamut mapping.
|
||||
// Values may be out of [0,1].
|
||||
func (c *ConvertColor) LabToSRGBNoGamutMap(L, a, b float64) (r, g, bl float64) {
|
||||
rLin, gLin, bLin := c.LabToLinearRGB(L, a, b)
|
||||
r = linearToSRGBComp(rLin)
|
||||
g = linearToSRGBComp(gLin)
|
||||
bl = linearToSRGBComp(bLin)
|
||||
return
|
||||
}
|
||||
|
||||
// LabToSRGBClamp converts Lab(whitepoint) to sRGB(D65) without doing any gamut mapping.
|
||||
func (c *ConvertColor) LabToSRGBClamp(L, a, b float64) (r, g, bl float64) {
|
||||
rLin, gLin, bLin := c.LabToLinearRGB(L, a, b)
|
||||
r = clamp01(linearToSRGBComp(rLin))
|
||||
g = clamp01(linearToSRGBComp(gLin))
|
||||
bl = clamp01(linearToSRGBComp(bLin))
|
||||
return
|
||||
}
|
||||
|
||||
// LabToLinearRGB converts Lab to linear RGB (not gamma-corrected), but still
|
||||
// with chromatic adaptation to D65 fused into the matrix. Output is linear sRGB.
|
||||
func (c *ConvertColor) LabToLinearRGB(L, a, b float64) (r, g, bl float64) {
|
||||
X, Y, Z := c.LabToXYZ(L, a, b)
|
||||
rv, gv, bv := mulMat3Vec(c.combined_XYZ_to_linear_SRGB, Vec3{X, Y, Z})
|
||||
return rv, gv, bv
|
||||
}
|
||||
|
||||
// XYZToLinearRGB converts XYZ expressed relative to the specified whitepoint
|
||||
// directly to linear sRGB values (D65) using the precomputed fused matrix.
|
||||
// The output is linear RGB and may be outside the [0,1] range.
|
||||
func (c *ConvertColor) XYZToLinearRGB(X, Y, Z float64) (r, g, b float64) {
|
||||
r, g, b = mulMat3Vec(c.combined_XYZ_to_linear_SRGB, Vec3{X, Y, Z})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ConvertColor) Matrix() Mat3 {
|
||||
return c.combined_XYZ_to_linear_SRGB
|
||||
}
|
||||
|
||||
// XYZToSRGBNoGamutMap converts XYZ expressed relative to the whitepoint directly to
|
||||
// gamma-corrected sRGB values (D65). The outputs are clamped to [0,1].
|
||||
// This function re-uses the precomputed combined matrix and the existing companding function.
|
||||
func (c *ConvertColor) XYZToSRGBNoGamutMap(X, Y, Z float64) (r, g, b float64) {
|
||||
rl, gl, bl := c.XYZToLinearRGB(X, Y, Z)
|
||||
// Apply sRGB companding and clamp
|
||||
r = clamp01(linearToSRGBComp(rl))
|
||||
g = clamp01(linearToSRGBComp(gl))
|
||||
b = clamp01(linearToSRGBComp(bl))
|
||||
return
|
||||
}
|
||||
|
||||
// If you need the non-clamped gamma-corrected values (for checking out-of-gamut)
|
||||
// you can use this helper which only compands but doesn't clamp.
|
||||
func (c *ConvertColor) XYZToSRGBNoClamp(X, Y, Z float64) (r, g, b float64) {
|
||||
rl, gl, bl := c.XYZToLinearRGB(X, Y, Z)
|
||||
r = linearToSRGBComp(rl)
|
||||
g = linearToSRGBComp(gl)
|
||||
b = linearToSRGBComp(bl)
|
||||
return
|
||||
}
|
||||
|
||||
// XYZToSRGB converts XYZ (whitepoint) to sRGB (D65) using the Lab-projection
|
||||
// + chroma-scaling gamut mapping. It projects XYZ into CIELAB (whitepoint), reuses the
|
||||
// existing LabToSRGB (which performs chroma-scaling if needed), and returns final sRGB.
|
||||
func (c *ConvertColor) XYZToSRGB(X, Y, Z float64) (r, g, b float64) {
|
||||
r, g, b = c.XYZToSRGBNoClamp(X, Y, Z)
|
||||
if inGamut(r, g, b) {
|
||||
return
|
||||
}
|
||||
L, a, bb := c.XYZToLab(X, Y, Z)
|
||||
return c.LabToSRGB(L, a, bb)
|
||||
}
|
||||
|
||||
// Helpers: core conversions
|
||||
|
||||
func finv(t float64) float64 {
|
||||
const delta = 6.0 / 29.0
|
||||
if t > delta {
|
||||
return t * t * t
|
||||
}
|
||||
// when t <= delta: 3*delta^2*(t - 4/29)
|
||||
return 3 * delta * delta * (t - 4.0/29.0)
|
||||
}
|
||||
|
||||
// LabToXYZ converts Lab (whitepoint) to CIE XYZ values relative to the whitepoint (Y=1).
|
||||
func (c *ConvertColor) LabToXYZ(L, a, b float64) (X, Y, Z float64) {
|
||||
// Inverse of the CIELAB f function
|
||||
var fy = (L + 16.0) / 116.0
|
||||
var fx = fy + (a / 500.0)
|
||||
var fz = fy - (b / 200.0)
|
||||
|
||||
xr := finv(fx)
|
||||
yr := finv(fy)
|
||||
zr := finv(fz)
|
||||
|
||||
X = xr * c.whitepoint[0]
|
||||
Y = yr * c.whitepoint[1]
|
||||
Z = zr * c.whitepoint[2]
|
||||
return
|
||||
}
|
||||
|
||||
func ff(t float64) float64 {
|
||||
const delta = 6.0 / 29.0
|
||||
if t > delta*delta*delta {
|
||||
return math.Cbrt(t)
|
||||
}
|
||||
// t <= delta^3
|
||||
return t/(3*delta*delta) + 4.0/29.0
|
||||
}
|
||||
|
||||
func xyz_to_lab(wt Vec3, X, Y, Z float64) (L, a, b float64) {
|
||||
// Normalize by white
|
||||
xr := X / wt[0]
|
||||
yr := Y / wt[1]
|
||||
zr := Z / wt[2]
|
||||
|
||||
fx := ff(xr)
|
||||
fy := ff(yr)
|
||||
fz := ff(zr)
|
||||
|
||||
L = 116.0*fy - 16.0
|
||||
a = 500.0 * (fx - fy)
|
||||
b = 200.0 * (fy - fz)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// XYZToLab converts XYZ (relative to whitepoint, Y=1) into CIELAB (whitepoint).
|
||||
func (c *ConvertColor) XYZToLab(X, Y, Z float64) (L, a, b float64) {
|
||||
return xyz_to_lab(c.whitepoint, X, Y, Z)
|
||||
}
|
||||
|
||||
// linearToSRGBComp applies the sRGB (gamma) companding function to a linear component.
|
||||
func linearToSRGBComp(c float64) float64 {
|
||||
switch {
|
||||
case c <= 0.0031308:
|
||||
// clip small negative values for stability
|
||||
if c < 0 && c > -1./math.MaxUint16 {
|
||||
return 0
|
||||
}
|
||||
return 12.92 * c
|
||||
default:
|
||||
return 1.055*math.Pow(c, 1.0/2.4) - 0.055
|
||||
}
|
||||
}
|
||||
|
||||
// Convert sRGB to linear light
|
||||
func srgbToLinear(c float64) float64 {
|
||||
c = clamp01(c)
|
||||
// sRGB transfer function inverse
|
||||
if c <= 0.04045 {
|
||||
return c / 12.92
|
||||
}
|
||||
return math.Pow((c+0.055)/1.055, 2.4)
|
||||
}
|
||||
|
||||
// Converts linear RGB to CIE XYZ using sRGB D65 matrix.
|
||||
// Input r,g,b must be linear-light (not gamma-encoded).
|
||||
func rgbToXYZ(r, g, b float64) (x, y, z float64) {
|
||||
// sRGB (linear) to XYZ (D65), matrix from IEC 61966-2-1
|
||||
x = 0.4124564*r + 0.3575761*g + 0.1804375*b
|
||||
y = 0.2126729*r + 0.7151522*g + 0.0721750*b
|
||||
z = 0.0193339*r + 0.1191920*g + 0.9503041*b
|
||||
return
|
||||
}
|
||||
|
||||
func SrgbToLab(r, g, b float64) (L, a, B float64) {
|
||||
// convert gamma-encoded sRGB to linear
|
||||
r = srgbToLinear(r)
|
||||
g = srgbToLinear(g)
|
||||
b = srgbToLinear(b)
|
||||
x, y, z := rgbToXYZ(r, g, b)
|
||||
return xyz_to_lab(whiteD65, x, y, z)
|
||||
}
|
||||
|
||||
// h' (in degrees 0..360)
|
||||
func hp(aPrime, b float64) float64 {
|
||||
if aPrime == 0 && b == 0 {
|
||||
return 0.0
|
||||
}
|
||||
angle := math.Atan2(b, aPrime) * (180.0 / math.Pi)
|
||||
if angle < 0 {
|
||||
angle += 360.0
|
||||
}
|
||||
return angle
|
||||
}
|
||||
|
||||
// DeltaE2000 computes the CIEDE2000 color-difference between two Lab colors.
|
||||
// Implementation follows the formula from Sharma et al., 2005.
|
||||
func DeltaE2000(L1, a1, b1, L2, a2, b2 float64) float64 {
|
||||
// Weighting factors
|
||||
kL, kC, kH := 1.0, 1.0, 1.0
|
||||
|
||||
// Step 1: Compute C' and h'
|
||||
C1 := math.Hypot(a1, b1)
|
||||
C2 := math.Hypot(a2, b2)
|
||||
// mean C'
|
||||
Cbar := (C1 + C2) / 2.0
|
||||
|
||||
// compute G
|
||||
Cbar7 := math.Pow(Cbar, 7)
|
||||
G := 0.5 * (1 - math.Sqrt(Cbar7/(Cbar7+math.Pow(25.0, 7))))
|
||||
|
||||
// a' values
|
||||
ap1 := (1 + G) * a1
|
||||
ap2 := (1 + G) * a2
|
||||
|
||||
// C' recalculated
|
||||
C1p := math.Hypot(ap1, b1)
|
||||
C2p := math.Hypot(ap2, b2)
|
||||
|
||||
h1p := hp(ap1, b1)
|
||||
h2p := hp(ap2, b2)
|
||||
|
||||
// delta L'
|
||||
dLp := L2 - L1
|
||||
// delta C'
|
||||
dCp := C2p - C1p
|
||||
|
||||
// delta h'
|
||||
var dhp float64
|
||||
if C1p*C2p == 0 {
|
||||
dhp = 0
|
||||
} else {
|
||||
diff := h2p - h1p
|
||||
if math.Abs(diff) <= 180 {
|
||||
dhp = diff
|
||||
} else if diff > 180 {
|
||||
dhp = diff - 360
|
||||
} else {
|
||||
dhp = diff + 360
|
||||
}
|
||||
}
|
||||
// convert to radians for the formula
|
||||
dHp := 2 * math.Sqrt(C1p*C2p) * math.Sin((dhp*math.Pi/180.0)/2.0)
|
||||
|
||||
// average L', C', h'
|
||||
LpBar := (L1 + L2) / 2.0
|
||||
CpBar := (C1p + C2p) / 2.0
|
||||
|
||||
var hpBar float64
|
||||
if C1p*C2p == 0 {
|
||||
hpBar = h1p + h2p
|
||||
} else {
|
||||
diff := math.Abs(h1p - h2p)
|
||||
if diff <= 180 {
|
||||
hpBar = (h1p + h2p) / 2.0
|
||||
} else if (h1p + h2p) < 360 {
|
||||
hpBar = (h1p + h2p + 360) / 2.0
|
||||
} else {
|
||||
hpBar = (h1p + h2p - 360) / 2.0
|
||||
}
|
||||
}
|
||||
|
||||
// T
|
||||
T := 1 - 0.17*math.Cos((hpBar-30)*math.Pi/180.0) +
|
||||
0.24*math.Cos((2*hpBar)*math.Pi/180.0) +
|
||||
0.32*math.Cos((3*hpBar+6)*math.Pi/180.0) -
|
||||
0.20*math.Cos((4*hpBar-63)*math.Pi/180.0)
|
||||
|
||||
// delta theta
|
||||
dTheta := 30 * math.Exp(-((hpBar-275)/25)*((hpBar-275)/25))
|
||||
// R_C
|
||||
Rc := 2 * math.Sqrt(math.Pow(CpBar, 7)/(math.Pow(CpBar, 7)+math.Pow(25.0, 7)))
|
||||
// S_L, S_C, S_H
|
||||
Sl := 1 + ((0.015 * (LpBar - 50) * (LpBar - 50)) / math.Sqrt(20+((LpBar-50)*(LpBar-50))))
|
||||
Sc := 1 + 0.045*CpBar
|
||||
Sh := 1 + 0.015*CpBar*T
|
||||
// R_T
|
||||
RT := -math.Sin(2*dTheta*math.Pi/180.0) * Rc
|
||||
|
||||
// finally
|
||||
dL := dLp / (kL * Sl)
|
||||
dC := dCp / (kC * Sc)
|
||||
dH := dHp / (kH * Sh)
|
||||
|
||||
return math.Sqrt(dL*dL + dC*dC + dH*dH + RT*dC*dH)
|
||||
}
|
||||
|
||||
// DeltaEBetweenSrgb takes two sRGB colors (0..1) and returns the Delta E (CIEDE2000).
|
||||
func DeltaEBetweenSrgb(r1, g1, b1, r2, g2, b2 float64) float64 {
|
||||
L1, a1, b1 := SrgbToLab(r1, g1, b1)
|
||||
L2, a2, b2 := SrgbToLab(r2, g2, b2)
|
||||
return DeltaE2000(L1, a1, b1, L2, a2, b2)
|
||||
}
|
||||
|
||||
// inGamut checks whether r,g,b are all inside [0,1]
|
||||
func inGamut(r, g, b float64) bool {
|
||||
return 0 <= r && r <= 1 && 0 <= g && g <= 1 && 0 <= b && b <= 1
|
||||
}
|
||||
|
||||
// gamutMapChromaScale reduces chroma (a,b) by scaling factor s in [0,1] to bring the
|
||||
// color into gamut. Binary search is used to find the maximum s such that the color
|
||||
// is in gamut. L is preserved.
|
||||
func (c *ConvertColor) gamutMapChromaScale(L, a, b float64) (r, g, bl float64) {
|
||||
// If a==0 && b==0 we can't scale; just clip after conversion
|
||||
if a == 0 && b == 0 {
|
||||
r0, g0, b0 := c.LabToSRGBNoGamutMap(L, a, b)
|
||||
return clamp01(r0), clamp01(g0), clamp01(b0)
|
||||
}
|
||||
// Binary search scale factor in [0,1]
|
||||
lo := 0.0
|
||||
hi := 1.0
|
||||
var mid float64
|
||||
var foundR, foundG, foundB float64
|
||||
// If even fully desaturated (a=b=0) is out of gamut, we'll clip
|
||||
for range 24 {
|
||||
mid = (lo + hi) / 2.0
|
||||
a2 := a * mid
|
||||
b2 := b * mid
|
||||
r0, g0, b0 := c.LabToSRGBNoGamutMap(L, a2, b2)
|
||||
if inGamut(r0, g0, b0) {
|
||||
foundR, foundG, foundB = r0, g0, b0
|
||||
// can try to keep more chroma
|
||||
lo = mid
|
||||
} else {
|
||||
hi = mid
|
||||
}
|
||||
}
|
||||
// If we never found a valid in-gamut during binary search, try a= b =0
|
||||
if !(inGamut(foundR, foundG, foundB)) {
|
||||
r0, g0, b0 := c.LabToSRGBNoGamutMap(L, 0, 0)
|
||||
// if still out-of-gamut (very unlikely), clip
|
||||
return clamp01(r0), clamp01(g0), clamp01(b0)
|
||||
}
|
||||
return clamp01(foundR), clamp01(foundG), clamp01(foundB)
|
||||
}
|
||||
|
||||
// clamp01 clamps value to [0,1]
|
||||
func clamp01(x float64) float64 {
|
||||
return max(0, min(x, 1))
|
||||
}
|
||||
|
||||
// Matrix & vector utilities
|
||||
|
||||
func mulMat3(a, b Mat3) Mat3 {
|
||||
var out Mat3
|
||||
for i := range 3 {
|
||||
for j := range 3 {
|
||||
sum := 0.0
|
||||
for k := range 3 {
|
||||
sum += a[i][k] * b[k][j]
|
||||
}
|
||||
out[i][j] = sum
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mulMat3Vec(m Mat3, v Vec3) (x, y, z float64) {
|
||||
x = m[0][0]*v[0] + m[0][1]*v[1] + m[0][2]*v[2]
|
||||
y = m[1][0]*v[0] + m[1][1]*v[1] + m[1][2]*v[2]
|
||||
z = m[2][0]*v[0] + m[2][1]*v[1] + m[2][2]*v[2]
|
||||
return
|
||||
}
|
||||
|
||||
// chromaticAdaptationMatrix constructs a 3x3 matrix that adapts XYZ values
|
||||
// from sourceWhite to targetWhite using the Bradford method.
|
||||
func chromaticAdaptationMatrix(sourceWhite, targetWhite Vec3) Mat3 {
|
||||
// Bradford transform matrices (forward and inverse)
|
||||
var (
|
||||
bradford = Mat3{
|
||||
{0.8951, 0.2664, -0.1614},
|
||||
{-0.7502, 1.7135, 0.0367},
|
||||
{0.0389, -0.0685, 1.0296},
|
||||
}
|
||||
bradford_inverted = Mat3{
|
||||
{0.9869929054667121, -0.1470542564209901, 0.1599626516637312},
|
||||
{0.4323052697233945, 0.5183602715367774, 0.049291228212855594},
|
||||
{-0.008528664575177326, 0.04004282165408486, 0.96848669578755},
|
||||
}
|
||||
)
|
||||
|
||||
// Convert whites to LMS using Bradford
|
||||
srcL, srcM, srcS := mulMat3Vec(bradford, sourceWhite)
|
||||
tgtL, tgtM, tgtS := mulMat3Vec(bradford, targetWhite)
|
||||
// Build diag matrix in-between
|
||||
diag := Mat3{
|
||||
{tgtL / srcL, 0, 0},
|
||||
{0, tgtM / srcM, 0},
|
||||
{0, 0, tgtS / srcS},
|
||||
}
|
||||
// adapt = invBradford * diag * bradford
|
||||
tmp := mulMat3(diag, bradford) // diag*B
|
||||
adapt := mulMat3(bradford_inverted, tmp) // invB * (diag*B)
|
||||
return adapt
|
||||
}
|
||||
2
vendor/github.com/kovidgoyal/imaging/convolution.go
generated
vendored
2
vendor/github.com/kovidgoyal/imaging/convolution.go
generated
vendored
@@ -72,7 +72,7 @@ func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *imag
|
||||
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
for x := 0; x < w; x++ {
|
||||
for x := range w {
|
||||
var r, g, b float64
|
||||
for _, c := range coefs {
|
||||
ix := x + c.x
|
||||
|
||||
18
vendor/github.com/kovidgoyal/imaging/custom-lcms.sh
generated
vendored
Normal file
18
vendor/github.com/kovidgoyal/imaging/custom-lcms.sh
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# custom-lcms.sh
|
||||
# Copyright (C) 2025 Kovid Goyal <kovid at kovidgoyal.net>
|
||||
#
|
||||
# Distributed under terms of the MIT license.
|
||||
#
|
||||
dist=`pwd`/lcms/dist
|
||||
libdir="$dist/lib"
|
||||
cd lcms
|
||||
if [[ ! -d "$dist" ]]; then
|
||||
./configure --prefix="$dist" || exit 1
|
||||
fi
|
||||
echo "Building lcms..." && \
|
||||
make -j8 >/dev/null&& make install >/dev/null&& cd .. && \
|
||||
echo "lcms in -- $libdir" && \
|
||||
CGO_LDFLAGS="-L$libdir" go test -tags lcms2cgo -run Develop -v -c ./prism && \
|
||||
LD_LIBRARY_PATH="$libdir" exec ./prism.test -test.run Develop
|
||||
45
vendor/github.com/kovidgoyal/imaging/effects.go
generated
vendored
45
vendor/github.com/kovidgoyal/imaging/effects.go
generated
vendored
@@ -3,6 +3,8 @@ package imaging
|
||||
import (
|
||||
"image"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
func gaussianBlurKernel(x, sigma float64) float64 {
|
||||
@@ -31,21 +33,22 @@ func Blur(img image.Image, sigma float64) *image.NRGBA {
|
||||
}
|
||||
|
||||
func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLine := make([]uint8, w*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
src.Scan(0, y, w, y+1, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for x := 0; x < src.w; x++ {
|
||||
for x := range w {
|
||||
minv := max(0, x-radius)
|
||||
maxv := min(x+radius, src.w-1)
|
||||
maxv := min(x+radius, w-1)
|
||||
var r, g, b, a, wsum float64
|
||||
for ix := minv; ix <= maxv; ix++ {
|
||||
i := ix * 4
|
||||
@@ -69,7 +72,7 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -77,21 +80,22 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
}
|
||||
|
||||
func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.h*4)
|
||||
scanLine := make([]uint8, h*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for x := start; x < limit; x++ {
|
||||
src.Scan(x, 0, x+1, src.h, scanLine)
|
||||
src.Scan(x, 0, x+1, h, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for y := 0; y < src.h; y++ {
|
||||
for y := range h {
|
||||
minv := max(0, y-radius)
|
||||
maxv := min(y+radius, src.h-1)
|
||||
maxv := min(y+radius, h-1)
|
||||
var r, g, b, a, wsum float64
|
||||
for iy := minv; iy <= maxv; iy++ {
|
||||
i := iy * 4
|
||||
@@ -115,7 +119,7 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 0, src.w); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -133,16 +137,17 @@ func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
blurred := Blur(img, sigma)
|
||||
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLine := make([]uint8, w*4)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
src.Scan(0, y, w, y+1, scanLine)
|
||||
j := y * dst.Stride
|
||||
for i := 0; i < src.w*4; i++ {
|
||||
for i := 0; i < w*4; i++ {
|
||||
val := int(scanLine[i])<<1 - int(blurred.Pix[j])
|
||||
if val < 0 {
|
||||
val = 0
|
||||
@@ -153,7 +158,7 @@ func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
j++
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
||||
15
vendor/github.com/kovidgoyal/imaging/histogram.go
generated
vendored
15
vendor/github.com/kovidgoyal/imaging/histogram.go
generated
vendored
@@ -3,6 +3,8 @@ package imaging
|
||||
import (
|
||||
"image"
|
||||
"sync"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
// Histogram returns a normalized histogram of an image.
|
||||
@@ -14,19 +16,20 @@ func Histogram(img image.Image) [256]float64 {
|
||||
var histogram [256]float64
|
||||
var total float64
|
||||
|
||||
src := newScanner(img)
|
||||
if src.w == 0 || src.h == 0 {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
if w == 0 || h == 0 {
|
||||
return histogram
|
||||
}
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
var tmpHistogram [256]float64
|
||||
var tmpTotal float64
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLine := make([]uint8, w*4)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
src.Scan(0, y, w, y+1, scanLine)
|
||||
i := 0
|
||||
for x := 0; x < src.w; x++ {
|
||||
for range w {
|
||||
s := scanLine[i : i+3 : i+3]
|
||||
r := s[0]
|
||||
g := s[1]
|
||||
@@ -43,7 +46,7 @@ func Histogram(img image.Image) [256]float64 {
|
||||
}
|
||||
total += tmpTotal
|
||||
mu.Unlock()
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
||||
585
vendor/github.com/kovidgoyal/imaging/io.go
generated
vendored
585
vendor/github.com/kovidgoyal/imaging/io.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"image/gif"
|
||||
"image/jpeg"
|
||||
@@ -11,34 +12,83 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kovidgoyal/imaging/apng"
|
||||
myjpeg "github.com/kovidgoyal/imaging/jpeg"
|
||||
"github.com/kovidgoyal/imaging/magick"
|
||||
_ "github.com/kovidgoyal/imaging/netpbm"
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/autometa"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/tiffmeta"
|
||||
"github.com/kovidgoyal/imaging/streams"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
"github.com/kovidgoyal/imaging/webp"
|
||||
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
exif_tiff "github.com/rwcarlsen/goexif/tiff"
|
||||
|
||||
"golang.org/x/image/bmp"
|
||||
"golang.org/x/image/tiff"
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
type fileSystem interface {
|
||||
Create(string) (io.WriteCloser, error)
|
||||
Open(string) (io.ReadCloser, error)
|
||||
Open(string) (*os.File, error)
|
||||
}
|
||||
|
||||
type localFS struct{}
|
||||
|
||||
func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
|
||||
func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
|
||||
func (localFS) Open(name string) (*os.File, error) { return os.Open(name) }
|
||||
|
||||
var fs fileSystem = localFS{}
|
||||
var mockable_fs fileSystem = localFS{}
|
||||
|
||||
type decodeConfig struct {
|
||||
autoOrientation bool
|
||||
type Backend int
|
||||
|
||||
const (
|
||||
GO_IMAGE Backend = iota
|
||||
MAGICK_IMAGE
|
||||
)
|
||||
|
||||
func (b Backend) String() string {
|
||||
switch b {
|
||||
case GO_IMAGE:
|
||||
return "GO_IMAGE"
|
||||
case MAGICK_IMAGE:
|
||||
return "MAGICK_IMAGE"
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_IMAGE_TYPE_%d", b)
|
||||
}
|
||||
|
||||
var defaultDecodeConfig = decodeConfig{
|
||||
autoOrientation: true,
|
||||
type ColorSpaceType int
|
||||
|
||||
const (
|
||||
NO_CHANGE_OF_COLORSPACE ColorSpaceType = iota
|
||||
SRGB_COLORSPACE
|
||||
)
|
||||
const (
|
||||
Relative icc.RenderingIntent = icc.RelativeColorimetricRenderingIntent
|
||||
Perceptual = icc.PerceptualRenderingIntent
|
||||
Saturation = icc.SaturationRenderingIntent
|
||||
Absolute = icc.AbsoluteColorimetricRenderingIntent
|
||||
)
|
||||
|
||||
type ResizeCallbackFunction func(w, h int) (nw, nh int)
|
||||
|
||||
type decodeConfig struct {
|
||||
autoOrientation bool
|
||||
outputColorspace ColorSpaceType
|
||||
transform types.TransformType
|
||||
resize ResizeCallbackFunction
|
||||
background *color.RGBA64
|
||||
backends []Backend
|
||||
rendering_intent icc.RenderingIntent
|
||||
use_blackpoint_compensation bool
|
||||
}
|
||||
|
||||
// DecodeOption sets an optional parameter for the Decode and Open functions.
|
||||
@@ -53,39 +103,405 @@ func AutoOrientation(enabled bool) DecodeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads an image from r.
|
||||
func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
cfg := defaultDecodeConfig
|
||||
// ColorSpace returns a DecodeOption that sets the colorspace that the
|
||||
// opened image will be in. Defaults to sRGB. If the image has an embedded ICC
|
||||
// color profile it is automatically used to convert colors to sRGB if needed.
|
||||
func ColorSpace(cs ColorSpaceType) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.outputColorspace = cs
|
||||
}
|
||||
}
|
||||
|
||||
// Specify a transform to perform on the image when loading it
|
||||
func Transform(t types.TransformType) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.transform = t
|
||||
}
|
||||
}
|
||||
|
||||
// Specify a background color onto which the image should be composed when loading it
|
||||
func Background(bg color.Color) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
nbg := color.RGBA64Model.Convert(bg).(color.RGBA64)
|
||||
c.background = &nbg
|
||||
}
|
||||
}
|
||||
|
||||
// Specify a callback function to decide a new size for the image when loading it
|
||||
func ResizeCallback(f ResizeCallbackFunction) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.resize = f
|
||||
}
|
||||
}
|
||||
|
||||
// Specify which backends to use to try to load the image, successively. If no backends are specified, the default
|
||||
// set are used.
|
||||
func Backends(backends ...Backend) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.backends = backends
|
||||
}
|
||||
}
|
||||
|
||||
// Set the rendering intent to use for ICC profile based color conversions
|
||||
func RenderingIntent(intent icc.RenderingIntent) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.rendering_intent = intent
|
||||
}
|
||||
}
|
||||
|
||||
// Set whether to use blackpoint compensation during ICC profile color conversions
|
||||
func BlackpointCompensation(enable bool) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.use_blackpoint_compensation = enable
|
||||
}
|
||||
}
|
||||
|
||||
func NewDecodeConfig(opts ...DecodeOption) (cfg *decodeConfig) {
|
||||
cfg = &decodeConfig{
|
||||
autoOrientation: true,
|
||||
outputColorspace: SRGB_COLORSPACE,
|
||||
transform: types.NoTransform,
|
||||
backends: []Backend{GO_IMAGE, MAGICK_IMAGE},
|
||||
// These settings match ImageMagick defaults as of v6
|
||||
rendering_intent: Relative,
|
||||
use_blackpoint_compensation: true,
|
||||
}
|
||||
default_backends := cfg.backends
|
||||
for _, option := range opts {
|
||||
option(&cfg)
|
||||
option(cfg)
|
||||
}
|
||||
if len(cfg.backends) == 0 {
|
||||
cfg.backends = default_backends
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !cfg.autoOrientation {
|
||||
img, _, err := image.Decode(r)
|
||||
return img, err
|
||||
func (cfg *decodeConfig) magick_callback(w, h int) (ro magick.RenderOptions) {
|
||||
ro.AutoOrient = cfg.autoOrientation
|
||||
if cfg.resize != nil {
|
||||
nw, nh := cfg.resize(w, h)
|
||||
if nw != w || nh != h {
|
||||
ro.ResizeTo.X, ro.ResizeTo.Y = nw, nh
|
||||
}
|
||||
}
|
||||
md, r, err := autometa.Load(r)
|
||||
var oval orientation = orientationUnspecified
|
||||
if err == nil && md != nil && len(md.ExifData) > 6 {
|
||||
exif_data, err := exif.Decode(bytes.NewReader(md.ExifData))
|
||||
if err == nil {
|
||||
orient, err := exif_data.Get(exif.Orientation)
|
||||
if err == nil && orient != nil {
|
||||
x, err := strconv.ParseUint(orient.String(), 10, 0)
|
||||
if err == nil && x > 0 && x < 9 {
|
||||
oval = orientation(int(x))
|
||||
}
|
||||
ro.Background = cfg.background
|
||||
ro.ToSRGB = cfg.outputColorspace == SRGB_COLORSPACE
|
||||
ro.Transform = cfg.transform
|
||||
ro.RenderingIntent = cfg.rendering_intent
|
||||
ro.BlackpointCompensation = cfg.use_blackpoint_compensation
|
||||
return
|
||||
}
|
||||
|
||||
// orientation is an EXIF flag that specifies the transformation
|
||||
// that should be applied to image to display it correctly.
|
||||
type orientation int
|
||||
|
||||
const (
|
||||
orientationUnspecified = 0
|
||||
orientationNormal = 1
|
||||
orientationFlipH = 2
|
||||
orientationRotate180 = 3
|
||||
orientationFlipV = 4
|
||||
orientationTranspose = 5
|
||||
orientationRotate270 = 6
|
||||
orientationTransverse = 7
|
||||
orientationRotate90 = 8
|
||||
)
|
||||
|
||||
func fix_colors(images []*Frame, md *meta.Data, cfg *decodeConfig) error {
|
||||
var err error
|
||||
if md == nil || cfg.outputColorspace != SRGB_COLORSPACE {
|
||||
return nil
|
||||
}
|
||||
if md.CICP.IsSet && !md.CICP.IsSRGB() {
|
||||
p := md.CICP.PipelineToSRGB()
|
||||
if p == nil {
|
||||
return fmt.Errorf("cannot convert colorspace, unknown %s", md.CICP)
|
||||
}
|
||||
for _, f := range images {
|
||||
if f.Image, err = convert(p, f.Image); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
profile, err := md.ICCProfile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if profile != nil {
|
||||
for _, f := range images {
|
||||
if f.Image, err = ConvertToSRGB(profile, cfg.rendering_intent, cfg.use_blackpoint_compensation, f.Image); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
img, _, err := image.Decode(r)
|
||||
func fix_orientation(ans *Image, md *meta.Data, cfg *decodeConfig) error {
|
||||
if md == nil || !cfg.autoOrientation {
|
||||
return nil
|
||||
}
|
||||
exif_data, err := md.Exif()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var oval orientation = orientationUnspecified
|
||||
if exif_data != nil {
|
||||
orient, err := exif_data.Get(exif.Orientation)
|
||||
if err == nil && orient != nil && orient.Format() == exif_tiff.IntVal {
|
||||
if x, err := orient.Int(0); err == nil && x > 0 && x < 9 {
|
||||
oval = orientation(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch oval {
|
||||
case orientationNormal, orientationUnspecified:
|
||||
case orientationFlipH:
|
||||
ans.FlipH()
|
||||
case orientationFlipV:
|
||||
ans.FlipV()
|
||||
case orientationRotate90:
|
||||
ans.Rotate90()
|
||||
case orientationRotate180:
|
||||
ans.Rotate180()
|
||||
case orientationRotate270:
|
||||
ans.Rotate270()
|
||||
case orientationTranspose:
|
||||
ans.Transpose()
|
||||
case orientationTransverse:
|
||||
ans.Transverse()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (img *Image) Transform(t types.TransformType) {
|
||||
switch t {
|
||||
case types.TransverseTransform:
|
||||
img.Transverse()
|
||||
case types.TransposeTransform:
|
||||
img.Transpose()
|
||||
case types.FlipHTransform:
|
||||
img.FlipH()
|
||||
case types.FlipVTransform:
|
||||
img.FlipV()
|
||||
case types.Rotate90Transform:
|
||||
img.Rotate90()
|
||||
case types.Rotate180Transform:
|
||||
img.Rotate180()
|
||||
case types.Rotate270Transform:
|
||||
img.Rotate270()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
NoTransform = types.NoTransform
|
||||
FlipHTransform = types.FlipHTransform
|
||||
FlipVTransform = types.FlipVTransform
|
||||
Rotate90Transform = types.Rotate90Transform
|
||||
Rotate180Transform = types.Rotate180Transform
|
||||
Rotate270Transform = types.Rotate270Transform
|
||||
TransverseTransform = types.TransverseTransform
|
||||
TransposeTransform = types.TransposeTransform
|
||||
)
|
||||
|
||||
func format_from_decode_result(x string) Format {
|
||||
switch x {
|
||||
case "BMP":
|
||||
return BMP
|
||||
case "TIFF", "TIF":
|
||||
return TIFF
|
||||
}
|
||||
return UNKNOWN
|
||||
}
|
||||
|
||||
func decode_all_go(r io.Reader, md *meta.Data, cfg *decodeConfig) (ans *Image, err error) {
|
||||
defer func() {
|
||||
if ans == nil || err != nil || ans.Metadata == nil {
|
||||
return
|
||||
}
|
||||
if cfg.outputColorspace != NO_CHANGE_OF_COLORSPACE {
|
||||
if err = fix_colors(ans.Frames, ans.Metadata, cfg); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if cfg.autoOrientation {
|
||||
if err = fix_orientation(ans, ans.Metadata, cfg); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if cfg.background != nil {
|
||||
ans.PasteOntoBackground(*cfg.background)
|
||||
}
|
||||
if cfg.transform != types.NoTransform {
|
||||
ans.Transform(cfg.transform)
|
||||
}
|
||||
if cfg.resize != nil {
|
||||
w, h := ans.Bounds().Dx(), ans.Bounds().Dy()
|
||||
nw, nh := cfg.resize(w, h)
|
||||
if nw != w || nh != h {
|
||||
ans.Resize(nw, nh, Lanczos)
|
||||
}
|
||||
}
|
||||
ans.Metadata.PixelWidth = uint32(ans.Bounds().Dx())
|
||||
ans.Metadata.PixelHeight = uint32(ans.Bounds().Dy())
|
||||
}()
|
||||
if md == nil {
|
||||
img, imgf, err := image.Decode(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := meta.Data{
|
||||
PixelWidth: uint32(img.Bounds().Dx()),
|
||||
PixelHeight: uint32(img.Bounds().Dy()),
|
||||
Format: format_from_decode_result(imgf),
|
||||
BitsPerComponent: tiffmeta.BitsPerComponent(img.ColorModel()),
|
||||
}
|
||||
f := Frame{Image: img}
|
||||
return &Image{Metadata: &m, Frames: []*Frame{&f}}, nil
|
||||
}
|
||||
ans = &Image{Metadata: md}
|
||||
if md.HasFrames {
|
||||
switch md.Format {
|
||||
case GIF:
|
||||
g, err := gif.DecodeAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans.populate_from_gif(g)
|
||||
case PNG:
|
||||
png, err := apng.DecodeAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans.populate_from_apng(&png)
|
||||
case WEBP:
|
||||
wp, err := webp.DecodeAnimated(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans.populate_from_webp(wp)
|
||||
}
|
||||
ans.Metadata.NumFrames = len(ans.Frames)
|
||||
ans.Metadata.NumPlays = int(ans.LoopCount)
|
||||
} else {
|
||||
var img image.Image
|
||||
switch md.Format {
|
||||
case JPEG:
|
||||
img, err = myjpeg.Decode(r)
|
||||
case PNG:
|
||||
img, err = apng.Decode(r)
|
||||
default:
|
||||
img, _, err = image.Decode(r)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans.Metadata.PixelWidth = uint32(img.Bounds().Dx())
|
||||
ans.Metadata.PixelHeight = uint32(img.Bounds().Dy())
|
||||
ans.Frames = append(ans.Frames, &Frame{Image: img})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func decode_all_magick(inp *types.Input, md *meta.Data, cfg *decodeConfig) (ans *Image, err error) {
|
||||
mi, err := magick.OpenAll(inp, md, cfg.magick_callback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans = &Image{Metadata: md}
|
||||
for _, f := range mi.Frames {
|
||||
fr := &Frame{
|
||||
Number: uint(f.Number), TopLeft: image.Pt(f.Left, f.Top), Image: f.Img,
|
||||
Delay: time.Millisecond * time.Duration(f.Delay_ms), ComposeOnto: uint(f.Compose_onto),
|
||||
Replace: f.Replace,
|
||||
}
|
||||
ans.Frames = append(ans.Frames, fr)
|
||||
}
|
||||
if md != nil {
|
||||
// in case of transforms/auto-orient
|
||||
b := ans.Bounds()
|
||||
md.PixelWidth, md.PixelHeight = uint32(b.Dx()), uint32(b.Dy())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
return fixOrientation(img, oval), nil
|
||||
func decode_all(inp *types.Input, opts []DecodeOption) (ans *Image, err error) {
|
||||
cfg := NewDecodeConfig(opts...)
|
||||
if !magick.HasMagick() {
|
||||
cfg.backends = slices.DeleteFunc(cfg.backends, func(b Backend) bool { return b == MAGICK_IMAGE })
|
||||
}
|
||||
if len(cfg.backends) == 0 {
|
||||
return nil, fmt.Errorf("the magick command was not found in PATH")
|
||||
}
|
||||
|
||||
if inp.Reader == nil {
|
||||
var f *os.File
|
||||
f, err = mockable_fs.Open(inp.Path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
inp.Reader = f
|
||||
}
|
||||
var md *meta.Data
|
||||
md, inp.Reader, err = autometa.Load(inp.Reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var backend_err error
|
||||
for _, backend := range cfg.backends {
|
||||
switch backend {
|
||||
case GO_IMAGE:
|
||||
inp.Reader, backend_err = streams.CallbackWithSeekable(inp.Reader, func(r io.Reader) (err error) {
|
||||
ans, err = decode_all_go(r, md, cfg)
|
||||
return
|
||||
})
|
||||
case MAGICK_IMAGE:
|
||||
inp.Reader, backend_err = streams.CallbackWithSeekable(inp.Reader, func(r io.Reader) (err error) {
|
||||
i := *inp
|
||||
i.Reader = r
|
||||
ans, err = decode_all_magick(&i, md, cfg)
|
||||
return
|
||||
})
|
||||
}
|
||||
if backend_err == nil && ans != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if ans == nil && backend_err == nil {
|
||||
backend_err = fmt.Errorf("unrecognised image format")
|
||||
}
|
||||
return ans, backend_err
|
||||
}
|
||||
|
||||
// Decode image from r including all animation frames if its an animated image.
|
||||
// Returns nil with no error when no supported image is found in r.
|
||||
// Also returns a reader that will yield all bytes from r so that this API does
|
||||
// not exhaust r.
|
||||
func DecodeAll(r io.Reader, opts ...DecodeOption) (ans *Image, s io.Reader, err error) {
|
||||
inp := &types.Input{Reader: r}
|
||||
ans, err = decode_all(inp, opts)
|
||||
s = inp.Reader
|
||||
return
|
||||
}
|
||||
|
||||
func (ans *Image) SingleFrame() image.Image {
|
||||
if ans.DefaultImage != nil {
|
||||
return ans.DefaultImage
|
||||
}
|
||||
return ans.Frames[0].Image
|
||||
|
||||
}
|
||||
|
||||
// Decode reads an image from r.
|
||||
func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
ans, _, err := DecodeAll(r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ans.SingleFrame(), nil
|
||||
}
|
||||
|
||||
// Open loads an image from file.
|
||||
@@ -94,20 +510,20 @@ func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
//
|
||||
// // Load an image from file.
|
||||
// img, err := imaging.Open("test.jpg")
|
||||
//
|
||||
// // Load an image and transform it depending on the EXIF orientation tag (if present).
|
||||
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
|
||||
func Open(filename string, opts ...DecodeOption) (image.Image, error) {
|
||||
file, err := fs.Open(filename)
|
||||
ans, err := OpenAll(filename, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return Decode(file, opts...)
|
||||
return ans.SingleFrame(), nil
|
||||
}
|
||||
|
||||
func OpenAll(filename string, opts ...DecodeOption) (*Image, error) {
|
||||
return decode_all(&types.Input{Path: filename}, opts)
|
||||
}
|
||||
|
||||
func OpenConfig(filename string) (ans image.Config, format_name string, err error) {
|
||||
file, err := fs.Open(filename)
|
||||
file, err := mockable_fs.Open(filename)
|
||||
if err != nil {
|
||||
return ans, "", err
|
||||
}
|
||||
@@ -115,58 +531,29 @@ func OpenConfig(filename string) (ans image.Config, format_name string, err erro
|
||||
return image.DecodeConfig(file)
|
||||
}
|
||||
|
||||
// Format is an image file format.
|
||||
type Format int
|
||||
type Format = types.Format
|
||||
|
||||
// Image file formats.
|
||||
const (
|
||||
JPEG Format = iota
|
||||
PNG
|
||||
GIF
|
||||
TIFF
|
||||
BMP
|
||||
PBM
|
||||
PGM
|
||||
PPM
|
||||
PAM
|
||||
UNKNOWN = types.UNKNOWN
|
||||
JPEG = types.JPEG
|
||||
PNG = types.PNG
|
||||
GIF = types.GIF
|
||||
TIFF = types.TIFF
|
||||
WEBP = types.WEBP
|
||||
BMP = types.BMP
|
||||
PBM = types.PBM
|
||||
PGM = types.PGM
|
||||
PPM = types.PPM
|
||||
PAM = types.PAM
|
||||
)
|
||||
|
||||
var formatExts = map[string]Format{
|
||||
"jpg": JPEG,
|
||||
"jpeg": JPEG,
|
||||
"png": PNG,
|
||||
"gif": GIF,
|
||||
"tif": TIFF,
|
||||
"tiff": TIFF,
|
||||
"bmp": BMP,
|
||||
"pbm": PBM,
|
||||
"pgm": PGM,
|
||||
"ppm": PPM,
|
||||
"pam": PAM,
|
||||
}
|
||||
|
||||
var formatNames = map[Format]string{
|
||||
JPEG: "JPEG",
|
||||
PNG: "PNG",
|
||||
GIF: "GIF",
|
||||
TIFF: "TIFF",
|
||||
BMP: "BMP",
|
||||
PBM: "PBM",
|
||||
PGM: "PGM",
|
||||
PAM: "PAM",
|
||||
}
|
||||
|
||||
func (f Format) String() string {
|
||||
return formatNames[f]
|
||||
}
|
||||
|
||||
// ErrUnsupportedFormat means the given image format is not supported.
|
||||
var ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
|
||||
|
||||
// FormatFromExtension parses image format from filename extension:
|
||||
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
|
||||
func FormatFromExtension(ext string) (Format, error) {
|
||||
if f, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
|
||||
if f, ok := types.FormatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
|
||||
return f, nil
|
||||
}
|
||||
return -1, ErrUnsupportedFormat
|
||||
@@ -247,7 +634,7 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e
|
||||
|
||||
switch format {
|
||||
case JPEG:
|
||||
if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() {
|
||||
if nrgba, ok := img.(*image.NRGBA); ok && IsOpaque(nrgba) {
|
||||
rgba := &image.RGBA{
|
||||
Pix: nrgba.Pix,
|
||||
Stride: nrgba.Stride,
|
||||
@@ -294,7 +681,7 @@ func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := fs.Create(filename)
|
||||
file, err := mockable_fs.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -305,41 +692,3 @@ func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// orientation is an EXIF flag that specifies the transformation
|
||||
// that should be applied to image to display it correctly.
|
||||
type orientation int
|
||||
|
||||
const (
|
||||
orientationUnspecified = 0
|
||||
orientationNormal = 1
|
||||
orientationFlipH = 2
|
||||
orientationRotate180 = 3
|
||||
orientationFlipV = 4
|
||||
orientationTranspose = 5
|
||||
orientationRotate270 = 6
|
||||
orientationTransverse = 7
|
||||
orientationRotate90 = 8
|
||||
)
|
||||
|
||||
// fixOrientation applies a transform to img corresponding to the given orientation flag.
|
||||
func fixOrientation(img image.Image, o orientation) image.Image {
|
||||
switch o {
|
||||
case orientationNormal:
|
||||
case orientationFlipH:
|
||||
img = FlipH(img)
|
||||
case orientationFlipV:
|
||||
img = FlipV(img)
|
||||
case orientationRotate90:
|
||||
img = Rotate90(img)
|
||||
case orientationRotate180:
|
||||
img = Rotate180(img)
|
||||
case orientationRotate270:
|
||||
img = Rotate270(img)
|
||||
case orientationTranspose:
|
||||
img = Transpose(img)
|
||||
case orientationTransverse:
|
||||
img = Transverse(img)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
245
vendor/github.com/kovidgoyal/imaging/jpeg/huffman.go
generated
vendored
Normal file
245
vendor/github.com/kovidgoyal/imaging/jpeg/huffman.go
generated
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jpeg
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// maxCodeLength is the maximum (inclusive) number of bits in a Huffman code.
|
||||
const maxCodeLength = 16
|
||||
|
||||
// maxNCodes is the maximum (inclusive) number of codes in a Huffman tree.
|
||||
const maxNCodes = 256
|
||||
|
||||
// lutSize is the log-2 size of the Huffman decoder's look-up table.
|
||||
const lutSize = 8
|
||||
|
||||
// huffman is a Huffman decoder, specified in section C.
|
||||
type huffman struct {
|
||||
// length is the number of codes in the tree.
|
||||
nCodes int32
|
||||
// lut is the look-up table for the next lutSize bits in the bit-stream.
|
||||
// The high 8 bits of the uint16 are the encoded value. The low 8 bits
|
||||
// are 1 plus the code length, or 0 if the value is too large to fit in
|
||||
// lutSize bits.
|
||||
lut [1 << lutSize]uint16
|
||||
// vals are the decoded values, sorted by their encoding.
|
||||
vals [maxNCodes]uint8
|
||||
// minCodes[i] is the minimum code of length i, or -1 if there are no
|
||||
// codes of that length.
|
||||
minCodes [maxCodeLength]int32
|
||||
// maxCodes[i] is the maximum code of length i, or -1 if there are no
|
||||
// codes of that length.
|
||||
maxCodes [maxCodeLength]int32
|
||||
// valsIndices[i] is the index into vals of minCodes[i].
|
||||
valsIndices [maxCodeLength]int32
|
||||
}
|
||||
|
||||
// errShortHuffmanData means that an unexpected EOF occurred while decoding
|
||||
// Huffman data.
|
||||
var errShortHuffmanData = FormatError("short Huffman data")
|
||||
|
||||
// ensureNBits reads bytes from the byte buffer to ensure that d.bits.n is at
|
||||
// least n. For best performance (avoiding function calls inside hot loops),
|
||||
// the caller is the one responsible for first checking that d.bits.n < n.
|
||||
func (d *decoder) ensureNBits(n int32) error {
|
||||
for {
|
||||
c, err := d.readByteStuffedByte()
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return errShortHuffmanData
|
||||
}
|
||||
return err
|
||||
}
|
||||
d.bits.a = d.bits.a<<8 | uint32(c)
|
||||
d.bits.n += 8
|
||||
if d.bits.m == 0 {
|
||||
d.bits.m = 1 << 7
|
||||
} else {
|
||||
d.bits.m <<= 8
|
||||
}
|
||||
if d.bits.n >= n {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// receiveExtend is the composition of RECEIVE and EXTEND, specified in section
|
||||
// F.2.2.1.
|
||||
func (d *decoder) receiveExtend(t uint8) (int32, error) {
|
||||
if d.bits.n < int32(t) {
|
||||
if err := d.ensureNBits(int32(t)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
d.bits.n -= int32(t)
|
||||
d.bits.m >>= t
|
||||
s := int32(1) << t
|
||||
x := int32(d.bits.a>>uint8(d.bits.n)) & (s - 1)
|
||||
if x < s>>1 {
|
||||
x += ((-1) << t) + 1
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// processDHT processes a Define Huffman Table marker, and initializes a huffman
|
||||
// struct from its contents. Specified in section B.2.4.2.
|
||||
func (d *decoder) processDHT(n int) error {
|
||||
for n > 0 {
|
||||
if n < 17 {
|
||||
return FormatError("DHT has wrong length")
|
||||
}
|
||||
if err := d.readFull(d.tmp[:17]); err != nil {
|
||||
return err
|
||||
}
|
||||
tc := d.tmp[0] >> 4
|
||||
if tc > maxTc {
|
||||
return FormatError("bad Tc value")
|
||||
}
|
||||
th := d.tmp[0] & 0x0f
|
||||
// The baseline th <= 1 restriction is specified in table B.5.
|
||||
if th > maxTh || (d.baseline && th > 1) {
|
||||
return FormatError("bad Th value")
|
||||
}
|
||||
h := &d.huff[tc][th]
|
||||
|
||||
// Read nCodes and h.vals (and derive h.nCodes).
|
||||
// nCodes[i] is the number of codes with code length i.
|
||||
// h.nCodes is the total number of codes.
|
||||
h.nCodes = 0
|
||||
var nCodes [maxCodeLength]int32
|
||||
for i := range nCodes {
|
||||
nCodes[i] = int32(d.tmp[i+1])
|
||||
h.nCodes += nCodes[i]
|
||||
}
|
||||
if h.nCodes == 0 {
|
||||
return FormatError("Huffman table has zero length")
|
||||
}
|
||||
if h.nCodes > maxNCodes {
|
||||
return FormatError("Huffman table has excessive length")
|
||||
}
|
||||
n -= int(h.nCodes) + 17
|
||||
if n < 0 {
|
||||
return FormatError("DHT has wrong length")
|
||||
}
|
||||
if err := d.readFull(h.vals[:h.nCodes]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Derive the look-up table.
|
||||
clear(h.lut[:])
|
||||
var x, code uint32
|
||||
for i := range uint32(lutSize) {
|
||||
code <<= 1
|
||||
for j := int32(0); j < nCodes[i]; j++ {
|
||||
// The codeLength is 1+i, so shift code by 8-(1+i) to
|
||||
// calculate the high bits for every 8-bit sequence
|
||||
// whose codeLength's high bits matches code.
|
||||
// The high 8 bits of lutValue are the encoded value.
|
||||
// The low 8 bits are 1 plus the codeLength.
|
||||
base := uint8(code << (7 - i))
|
||||
lutValue := uint16(h.vals[x])<<8 | uint16(2+i)
|
||||
for k := uint8(0); k < 1<<(7-i); k++ {
|
||||
h.lut[base|k] = lutValue
|
||||
}
|
||||
code++
|
||||
x++
|
||||
}
|
||||
}
|
||||
|
||||
// Derive minCodes, maxCodes, and valsIndices.
|
||||
var c, index int32
|
||||
for i, n := range nCodes {
|
||||
if n == 0 {
|
||||
h.minCodes[i] = -1
|
||||
h.maxCodes[i] = -1
|
||||
h.valsIndices[i] = -1
|
||||
} else {
|
||||
h.minCodes[i] = c
|
||||
h.maxCodes[i] = c + n - 1
|
||||
h.valsIndices[i] = index
|
||||
c += n
|
||||
index += n
|
||||
}
|
||||
c <<= 1
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeHuffman returns the next Huffman-coded value from the bit-stream,
|
||||
// decoded according to h.
|
||||
func (d *decoder) decodeHuffman(h *huffman) (uint8, error) {
|
||||
if h.nCodes == 0 {
|
||||
return 0, FormatError("uninitialized Huffman table")
|
||||
}
|
||||
|
||||
if d.bits.n < 8 {
|
||||
if err := d.ensureNBits(8); err != nil {
|
||||
if err != errMissingFF00 && err != errShortHuffmanData {
|
||||
return 0, err
|
||||
}
|
||||
// There are no more bytes of data in this segment, but we may still
|
||||
// be able to read the next symbol out of the previously read bits.
|
||||
// First, undo the readByte that the ensureNBits call made.
|
||||
if d.bytes.nUnreadable != 0 {
|
||||
d.unreadByteStuffedByte()
|
||||
}
|
||||
goto slowPath
|
||||
}
|
||||
}
|
||||
if v := h.lut[(d.bits.a>>uint32(d.bits.n-lutSize))&0xff]; v != 0 {
|
||||
n := (v & 0xff) - 1
|
||||
d.bits.n -= int32(n)
|
||||
d.bits.m >>= n
|
||||
return uint8(v >> 8), nil
|
||||
}
|
||||
|
||||
slowPath:
|
||||
for i, code := 0, int32(0); i < maxCodeLength; i++ {
|
||||
if d.bits.n == 0 {
|
||||
if err := d.ensureNBits(1); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if d.bits.a&d.bits.m != 0 {
|
||||
code |= 1
|
||||
}
|
||||
d.bits.n--
|
||||
d.bits.m >>= 1
|
||||
if code <= h.maxCodes[i] {
|
||||
return h.vals[h.valsIndices[i]+code-h.minCodes[i]], nil
|
||||
}
|
||||
code <<= 1
|
||||
}
|
||||
return 0, FormatError("bad Huffman code")
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBit() (bool, error) {
|
||||
if d.bits.n == 0 {
|
||||
if err := d.ensureNBits(1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
ret := d.bits.a&d.bits.m != 0
|
||||
d.bits.n--
|
||||
d.bits.m >>= 1
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBits(n int32) (uint32, error) {
|
||||
if d.bits.n < n {
|
||||
if err := d.ensureNBits(n); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
ret := d.bits.a >> uint32(d.bits.n-n)
|
||||
ret &= (1 << uint32(n)) - 1
|
||||
d.bits.n -= n
|
||||
d.bits.m >>= uint32(n)
|
||||
return ret, nil
|
||||
}
|
||||
194
vendor/github.com/kovidgoyal/imaging/jpeg/idct.go
generated
vendored
Normal file
194
vendor/github.com/kovidgoyal/imaging/jpeg/idct.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jpeg
|
||||
|
||||
// This is a Go translation of idct.c from
|
||||
//
|
||||
// http://standards.iso.org/ittf/PubliclyAvailableStandards/ISO_IEC_13818-4_2004_Conformance_Testing/Video/verifier/mpeg2decode_960109.tar.gz
|
||||
//
|
||||
// which carries the following notice:
|
||||
|
||||
/* Copyright (C) 1996, MPEG Software Simulation Group. All Rights Reserved. */
|
||||
|
||||
/*
|
||||
* Disclaimer of Warranty
|
||||
*
|
||||
* These software programs are available to the user without any license fee or
|
||||
* royalty on an "as is" basis. The MPEG Software Simulation Group disclaims
|
||||
* any and all warranties, whether express, implied, or statuary, including any
|
||||
* implied warranties or merchantability or of fitness for a particular
|
||||
* purpose. In no event shall the copyright-holder be liable for any
|
||||
* incidental, punitive, or consequential damages of any kind whatsoever
|
||||
* arising from the use of these programs.
|
||||
*
|
||||
* This disclaimer of warranty extends to the user of these programs and user's
|
||||
* customers, employees, agents, transferees, successors, and assigns.
|
||||
*
|
||||
* The MPEG Software Simulation Group does not represent or warrant that the
|
||||
* programs furnished hereunder are free of infringement of any third-party
|
||||
* patents.
|
||||
*
|
||||
* Commercial implementations of MPEG-1 and MPEG-2 video, including shareware,
|
||||
* are subject to royalty fees to patent holders. Many of these patents are
|
||||
* general enough such that they are unavoidable regardless of implementation
|
||||
* design.
|
||||
*
|
||||
*/
|
||||
|
||||
const blockSize = 64 // A DCT block is 8x8.
|
||||
|
||||
type block [blockSize]int32
|
||||
|
||||
const (
|
||||
w1 = 2841 // 2048*sqrt(2)*cos(1*pi/16)
|
||||
w2 = 2676 // 2048*sqrt(2)*cos(2*pi/16)
|
||||
w3 = 2408 // 2048*sqrt(2)*cos(3*pi/16)
|
||||
w5 = 1609 // 2048*sqrt(2)*cos(5*pi/16)
|
||||
w6 = 1108 // 2048*sqrt(2)*cos(6*pi/16)
|
||||
w7 = 565 // 2048*sqrt(2)*cos(7*pi/16)
|
||||
|
||||
w1pw7 = w1 + w7
|
||||
w1mw7 = w1 - w7
|
||||
w2pw6 = w2 + w6
|
||||
w2mw6 = w2 - w6
|
||||
w3pw5 = w3 + w5
|
||||
w3mw5 = w3 - w5
|
||||
|
||||
r2 = 181 // 256/sqrt(2)
|
||||
)
|
||||
|
||||
// idct performs a 2-D Inverse Discrete Cosine Transformation.
|
||||
//
|
||||
// The input coefficients should already have been multiplied by the
|
||||
// appropriate quantization table. We use fixed-point computation, with the
|
||||
// number of bits for the fractional component varying over the intermediate
|
||||
// stages.
|
||||
//
|
||||
// For more on the actual algorithm, see Z. Wang, "Fast algorithms for the
|
||||
// discrete W transform and for the discrete Fourier transform", IEEE Trans. on
|
||||
// ASSP, Vol. ASSP- 32, pp. 803-816, Aug. 1984.
|
||||
func idct(src *block) {
|
||||
// Horizontal 1-D IDCT.
|
||||
for y := range 8 {
|
||||
y8 := y * 8
|
||||
s := src[y8 : y8+8 : y8+8] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
// If all the AC components are zero, then the IDCT is trivial.
|
||||
if s[1] == 0 && s[2] == 0 && s[3] == 0 &&
|
||||
s[4] == 0 && s[5] == 0 && s[6] == 0 && s[7] == 0 {
|
||||
dc := s[0] << 3
|
||||
s[0] = dc
|
||||
s[1] = dc
|
||||
s[2] = dc
|
||||
s[3] = dc
|
||||
s[4] = dc
|
||||
s[5] = dc
|
||||
s[6] = dc
|
||||
s[7] = dc
|
||||
continue
|
||||
}
|
||||
|
||||
// Prescale.
|
||||
x0 := (s[0] << 11) + 128
|
||||
x1 := s[4] << 11
|
||||
x2 := s[6]
|
||||
x3 := s[2]
|
||||
x4 := s[1]
|
||||
x5 := s[7]
|
||||
x6 := s[5]
|
||||
x7 := s[3]
|
||||
|
||||
// Stage 1.
|
||||
x8 := w7 * (x4 + x5)
|
||||
x4 = x8 + w1mw7*x4
|
||||
x5 = x8 - w1pw7*x5
|
||||
x8 = w3 * (x6 + x7)
|
||||
x6 = x8 - w3mw5*x6
|
||||
x7 = x8 - w3pw5*x7
|
||||
|
||||
// Stage 2.
|
||||
x8 = x0 + x1
|
||||
x0 -= x1
|
||||
x1 = w6 * (x3 + x2)
|
||||
x2 = x1 - w2pw6*x2
|
||||
x3 = x1 + w2mw6*x3
|
||||
x1 = x4 + x6
|
||||
x4 -= x6
|
||||
x6 = x5 + x7
|
||||
x5 -= x7
|
||||
|
||||
// Stage 3.
|
||||
x7 = x8 + x3
|
||||
x8 -= x3
|
||||
x3 = x0 + x2
|
||||
x0 -= x2
|
||||
x2 = (r2*(x4+x5) + 128) >> 8
|
||||
x4 = (r2*(x4-x5) + 128) >> 8
|
||||
|
||||
// Stage 4.
|
||||
s[0] = (x7 + x1) >> 8
|
||||
s[1] = (x3 + x2) >> 8
|
||||
s[2] = (x0 + x4) >> 8
|
||||
s[3] = (x8 + x6) >> 8
|
||||
s[4] = (x8 - x6) >> 8
|
||||
s[5] = (x0 - x4) >> 8
|
||||
s[6] = (x3 - x2) >> 8
|
||||
s[7] = (x7 - x1) >> 8
|
||||
}
|
||||
|
||||
// Vertical 1-D IDCT.
|
||||
for x := range 8 {
|
||||
// Similar to the horizontal 1-D IDCT case, if all the AC components are zero, then the IDCT is trivial.
|
||||
// However, after performing the horizontal 1-D IDCT, there are typically non-zero AC components, so
|
||||
// we do not bother to check for the all-zero case.
|
||||
s := src[x : x+57 : x+57] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
|
||||
// Prescale.
|
||||
y0 := (s[8*0] << 8) + 8192
|
||||
y1 := s[8*4] << 8
|
||||
y2 := s[8*6]
|
||||
y3 := s[8*2]
|
||||
y4 := s[8*1]
|
||||
y5 := s[8*7]
|
||||
y6 := s[8*5]
|
||||
y7 := s[8*3]
|
||||
|
||||
// Stage 1.
|
||||
y8 := w7*(y4+y5) + 4
|
||||
y4 = (y8 + w1mw7*y4) >> 3
|
||||
y5 = (y8 - w1pw7*y5) >> 3
|
||||
y8 = w3*(y6+y7) + 4
|
||||
y6 = (y8 - w3mw5*y6) >> 3
|
||||
y7 = (y8 - w3pw5*y7) >> 3
|
||||
|
||||
// Stage 2.
|
||||
y8 = y0 + y1
|
||||
y0 -= y1
|
||||
y1 = w6*(y3+y2) + 4
|
||||
y2 = (y1 - w2pw6*y2) >> 3
|
||||
y3 = (y1 + w2mw6*y3) >> 3
|
||||
y1 = y4 + y6
|
||||
y4 -= y6
|
||||
y6 = y5 + y7
|
||||
y5 -= y7
|
||||
|
||||
// Stage 3.
|
||||
y7 = y8 + y3
|
||||
y8 -= y3
|
||||
y3 = y0 + y2
|
||||
y0 -= y2
|
||||
y2 = (r2*(y4+y5) + 128) >> 8
|
||||
y4 = (r2*(y4-y5) + 128) >> 8
|
||||
|
||||
// Stage 4.
|
||||
s[8*0] = (y7 + y1) >> 14
|
||||
s[8*1] = (y3 + y2) >> 14
|
||||
s[8*2] = (y0 + y4) >> 14
|
||||
s[8*3] = (y8 + y6) >> 14
|
||||
s[8*4] = (y8 - y6) >> 14
|
||||
s[8*5] = (y0 - y4) >> 14
|
||||
s[8*6] = (y3 - y2) >> 14
|
||||
s[8*7] = (y7 - y1) >> 14
|
||||
}
|
||||
}
|
||||
814
vendor/github.com/kovidgoyal/imaging/jpeg/reader.go
generated
vendored
Normal file
814
vendor/github.com/kovidgoyal/imaging/jpeg/reader.go
generated
vendored
Normal file
@@ -0,0 +1,814 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jpeg implements a JPEG image decoder and encoder.
|
||||
//
|
||||
// JPEG is defined in ITU-T T.81: https://www.w3.org/Graphics/JPEG/itu-t81.pdf.
|
||||
package jpeg
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
|
||||
"github.com/kovidgoyal/go-parallel"
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
// A FormatError reports that the input is not a valid JPEG.
|
||||
type FormatError string
|
||||
|
||||
func (e FormatError) Error() string { return "invalid JPEG format: " + string(e) }
|
||||
|
||||
// An UnsupportedError reports that the input uses a valid but unimplemented JPEG feature.
|
||||
type UnsupportedError string
|
||||
|
||||
func (e UnsupportedError) Error() string { return "unsupported JPEG feature: " + string(e) }
|
||||
|
||||
var errUnsupportedSubsamplingRatio = UnsupportedError("luma/chroma subsampling ratio")
|
||||
|
||||
// Component specification, specified in section B.2.2.
|
||||
type component struct {
|
||||
h int // Horizontal sampling factor.
|
||||
v int // Vertical sampling factor.
|
||||
c uint8 // Component identifier.
|
||||
tq uint8 // Quantization table destination selector.
|
||||
expand struct{ h, v int } // subsampleRatio for this component
|
||||
}
|
||||
|
||||
const (
|
||||
dcTable = 0
|
||||
acTable = 1
|
||||
maxTc = 1
|
||||
maxTh = 3
|
||||
maxTq = 3
|
||||
|
||||
maxComponents = 4
|
||||
)
|
||||
|
||||
const (
|
||||
sof0Marker = 0xc0 // Start Of Frame (Baseline Sequential).
|
||||
sof1Marker = 0xc1 // Start Of Frame (Extended Sequential).
|
||||
sof2Marker = 0xc2 // Start Of Frame (Progressive).
|
||||
dhtMarker = 0xc4 // Define Huffman Table.
|
||||
rst0Marker = 0xd0 // ReSTart (0).
|
||||
rst7Marker = 0xd7 // ReSTart (7).
|
||||
soiMarker = 0xd8 // Start Of Image.
|
||||
eoiMarker = 0xd9 // End Of Image.
|
||||
sosMarker = 0xda // Start Of Scan.
|
||||
dqtMarker = 0xdb // Define Quantization Table.
|
||||
driMarker = 0xdd // Define Restart Interval.
|
||||
comMarker = 0xfe // COMment.
|
||||
// "APPlication specific" markers aren't part of the JPEG spec per se,
|
||||
// but in practice, their use is described at
|
||||
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html
|
||||
app0Marker = 0xe0
|
||||
app14Marker = 0xee
|
||||
app15Marker = 0xef
|
||||
)
|
||||
|
||||
// See https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
|
||||
const (
|
||||
adobeTransformUnknown = 0
|
||||
adobeTransformYCbCr = 1
|
||||
adobeTransformYCbCrK = 2
|
||||
)
|
||||
|
||||
// unzig maps from the zig-zag ordering to the natural ordering. For example,
|
||||
// unzig[3] is the column and row of the fourth element in zig-zag order. The
|
||||
// value is 16, which means first column (16%8 == 0) and third row (16/8 == 2).
|
||||
var unzig = [blockSize]int{
|
||||
0, 1, 8, 16, 9, 2, 3, 10,
|
||||
17, 24, 32, 25, 18, 11, 4, 5,
|
||||
12, 19, 26, 33, 40, 48, 41, 34,
|
||||
27, 20, 13, 6, 7, 14, 21, 28,
|
||||
35, 42, 49, 56, 57, 50, 43, 36,
|
||||
29, 22, 15, 23, 30, 37, 44, 51,
|
||||
58, 59, 52, 45, 38, 31, 39, 46,
|
||||
53, 60, 61, 54, 47, 55, 62, 63,
|
||||
}
|
||||
|
||||
// Deprecated: Reader is not used by the [image/jpeg] package and should
|
||||
// not be used by others. It is kept for compatibility.
|
||||
type Reader interface {
|
||||
io.ByteReader
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// bits holds the unprocessed bits that have been taken from the byte-stream.
|
||||
// The n least significant bits of a form the unread bits, to be read in MSB to
|
||||
// LSB order.
|
||||
type bits struct {
|
||||
a uint32 // accumulator.
|
||||
m uint32 // mask. m==1<<(n-1) when n>0, with m==0 when n==0.
|
||||
n int32 // the number of unread bits in a.
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
r io.Reader
|
||||
bits bits
|
||||
// bytes is a byte buffer, similar to a bufio.Reader, except that it
|
||||
// has to be able to unread more than 1 byte, due to byte stuffing.
|
||||
// Byte stuffing is specified in section F.1.2.3.
|
||||
bytes struct {
|
||||
// buf[i:j] are the buffered bytes read from the underlying
|
||||
// io.Reader that haven't yet been passed further on.
|
||||
buf [4096]byte
|
||||
i, j int
|
||||
// nUnreadable is the number of bytes to back up i after
|
||||
// overshooting. It can be 0, 1 or 2.
|
||||
nUnreadable int
|
||||
}
|
||||
width, height int
|
||||
|
||||
img1 *image.Gray
|
||||
img3 *image.YCbCr
|
||||
blackPix []byte
|
||||
flex bool // uses a non-standard subsampleRatio
|
||||
force_flex bool // used for testing
|
||||
maxH, maxV int
|
||||
blackStride int
|
||||
|
||||
ri int // Restart Interval.
|
||||
nComp int
|
||||
|
||||
// As per section 4.5, there are four modes of operation (selected by the
|
||||
// SOF? markers): sequential DCT, progressive DCT, lossless and
|
||||
// hierarchical, although this implementation does not support the latter
|
||||
// two non-DCT modes. Sequential DCT is further split into baseline and
|
||||
// extended, as per section 4.11.
|
||||
baseline bool
|
||||
progressive bool
|
||||
|
||||
jfif bool
|
||||
adobeTransformValid bool
|
||||
adobeTransform uint8
|
||||
eobRun uint16 // End-of-Band run, specified in section G.1.2.2.
|
||||
|
||||
comp [maxComponents]component
|
||||
progCoeffs [maxComponents][]block // Saved state between progressive-mode scans.
|
||||
huff [maxTc + 1][maxTh + 1]huffman
|
||||
quant [maxTq + 1]block // Quantization tables, in zig-zag order.
|
||||
tmp [2 * blockSize]byte
|
||||
}
|
||||
|
||||
// fill fills up the d.bytes.buf buffer from the underlying io.Reader. It
|
||||
// should only be called when there are no unread bytes in d.bytes.
|
||||
func (d *decoder) fill() error {
|
||||
if d.bytes.i != d.bytes.j {
|
||||
panic("jpeg: fill called when unread bytes exist")
|
||||
}
|
||||
// Move the last 2 bytes to the start of the buffer, in case we need
|
||||
// to call unreadByteStuffedByte.
|
||||
if d.bytes.j > 2 {
|
||||
d.bytes.buf[0] = d.bytes.buf[d.bytes.j-2]
|
||||
d.bytes.buf[1] = d.bytes.buf[d.bytes.j-1]
|
||||
d.bytes.i, d.bytes.j = 2, 2
|
||||
}
|
||||
// Fill in the rest of the buffer.
|
||||
n, err := d.r.Read(d.bytes.buf[d.bytes.j:])
|
||||
d.bytes.j += n
|
||||
if n > 0 {
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unreadByteStuffedByte undoes the most recent readByteStuffedByte call,
|
||||
// giving a byte of data back from d.bits to d.bytes. The Huffman look-up table
|
||||
// requires at least 8 bits for look-up, which means that Huffman decoding can
|
||||
// sometimes overshoot and read one or two too many bytes. Two-byte overshoot
|
||||
// can happen when expecting to read a 0xff 0x00 byte-stuffed byte.
|
||||
func (d *decoder) unreadByteStuffedByte() {
|
||||
d.bytes.i -= d.bytes.nUnreadable
|
||||
d.bytes.nUnreadable = 0
|
||||
if d.bits.n >= 8 {
|
||||
d.bits.a >>= 8
|
||||
d.bits.n -= 8
|
||||
d.bits.m >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
// readByte returns the next byte, whether buffered or not buffered. It does
|
||||
// not care about byte stuffing.
|
||||
func (d *decoder) readByte() (x byte, err error) {
|
||||
for d.bytes.i == d.bytes.j {
|
||||
if err = d.fill(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
x = d.bytes.buf[d.bytes.i]
|
||||
d.bytes.i++
|
||||
d.bytes.nUnreadable = 0
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// errMissingFF00 means that readByteStuffedByte encountered an 0xff byte (a
|
||||
// marker byte) that wasn't the expected byte-stuffed sequence 0xff, 0x00.
|
||||
var errMissingFF00 = FormatError("missing 0xff00 sequence")
|
||||
|
||||
// readByteStuffedByte is like readByte but is for byte-stuffed Huffman data.
|
||||
func (d *decoder) readByteStuffedByte() (x byte, err error) {
|
||||
// Take the fast path if d.bytes.buf contains at least two bytes.
|
||||
if d.bytes.i+2 <= d.bytes.j {
|
||||
x = d.bytes.buf[d.bytes.i]
|
||||
d.bytes.i++
|
||||
d.bytes.nUnreadable = 1
|
||||
if x != 0xff {
|
||||
return x, err
|
||||
}
|
||||
if d.bytes.buf[d.bytes.i] != 0x00 {
|
||||
return 0, errMissingFF00
|
||||
}
|
||||
d.bytes.i++
|
||||
d.bytes.nUnreadable = 2
|
||||
return 0xff, nil
|
||||
}
|
||||
|
||||
d.bytes.nUnreadable = 0
|
||||
|
||||
x, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d.bytes.nUnreadable = 1
|
||||
if x != 0xff {
|
||||
return x, nil
|
||||
}
|
||||
|
||||
x, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d.bytes.nUnreadable = 2
|
||||
if x != 0x00 {
|
||||
return 0, errMissingFF00
|
||||
}
|
||||
return 0xff, nil
|
||||
}
|
||||
|
||||
// readFull reads exactly len(p) bytes into p. It does not care about byte
|
||||
// stuffing.
|
||||
func (d *decoder) readFull(p []byte) error {
|
||||
// Unread the overshot bytes, if any.
|
||||
if d.bytes.nUnreadable != 0 {
|
||||
if d.bits.n >= 8 {
|
||||
d.unreadByteStuffedByte()
|
||||
}
|
||||
d.bytes.nUnreadable = 0
|
||||
}
|
||||
|
||||
for {
|
||||
n := copy(p, d.bytes.buf[d.bytes.i:d.bytes.j])
|
||||
p = p[n:]
|
||||
d.bytes.i += n
|
||||
if len(p) == 0 {
|
||||
break
|
||||
}
|
||||
if err := d.fill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ignore ignores the next n bytes.
|
||||
func (d *decoder) ignore(n int) error {
|
||||
// Unread the overshot bytes, if any.
|
||||
if d.bytes.nUnreadable != 0 {
|
||||
if d.bits.n >= 8 {
|
||||
d.unreadByteStuffedByte()
|
||||
}
|
||||
d.bytes.nUnreadable = 0
|
||||
}
|
||||
|
||||
for {
|
||||
m := min(d.bytes.j-d.bytes.i, n)
|
||||
d.bytes.i += m
|
||||
n -= m
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
if err := d.fill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Specified in section B.2.2.
|
||||
func (d *decoder) processSOF(n int) error {
|
||||
if d.nComp != 0 {
|
||||
return FormatError("multiple SOF markers")
|
||||
}
|
||||
switch n {
|
||||
case 6 + 3*1: // Grayscale image.
|
||||
d.nComp = 1
|
||||
case 6 + 3*3: // YCbCr or RGB image.
|
||||
d.nComp = 3
|
||||
case 6 + 3*4: // YCbCrK or CMYK image.
|
||||
d.nComp = 4
|
||||
default:
|
||||
return UnsupportedError("number of components")
|
||||
}
|
||||
if err := d.readFull(d.tmp[:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
// We only support 8-bit precision.
|
||||
if d.tmp[0] != 8 {
|
||||
return UnsupportedError("precision")
|
||||
}
|
||||
d.height = int(d.tmp[1])<<8 + int(d.tmp[2])
|
||||
d.width = int(d.tmp[3])<<8 + int(d.tmp[4])
|
||||
if int(d.tmp[5]) != d.nComp {
|
||||
return FormatError("SOF has wrong length")
|
||||
}
|
||||
|
||||
for i := 0; i < d.nComp; i++ {
|
||||
d.comp[i].c = d.tmp[6+3*i]
|
||||
// Section B.2.2 states that "the value of C_i shall be different from
|
||||
// the values of C_1 through C_(i-1)".
|
||||
for j := 0; j < i; j++ {
|
||||
if d.comp[i].c == d.comp[j].c {
|
||||
return FormatError("repeated component identifier")
|
||||
}
|
||||
}
|
||||
|
||||
d.comp[i].tq = d.tmp[8+3*i]
|
||||
if d.comp[i].tq > maxTq {
|
||||
return FormatError("bad Tq value")
|
||||
}
|
||||
|
||||
hv := d.tmp[7+3*i]
|
||||
h, v := int(hv>>4), int(hv&0x0f)
|
||||
if h < 1 || 4 < h || v < 1 || 4 < v {
|
||||
return FormatError("luma/chroma subsampling ratio")
|
||||
}
|
||||
if h == 3 || v == 3 {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
d.maxH, d.maxV = max(d.maxH, h), max(d.maxV, v)
|
||||
switch d.nComp {
|
||||
case 1:
|
||||
// If a JPEG image has only one component, section A.2 says "this data
|
||||
// is non-interleaved by definition" and section A.2.2 says "[in this
|
||||
// case...] the order of data units within a scan shall be left-to-right
|
||||
// and top-to-bottom... regardless of the values of H_1 and V_1". Section
|
||||
// 4.8.2 also says "[for non-interleaved data], the MCU is defined to be
|
||||
// one data unit". Similarly, section A.1.1 explains that it is the ratio
|
||||
// of H_i to max_j(H_j) that matters, and similarly for V. For grayscale
|
||||
// images, H_1 is the maximum H_j for all components j, so that ratio is
|
||||
// always 1. The component's (h, v) is effectively always (1, 1): even if
|
||||
// the nominal (h, v) is (2, 1), a 20x5 image is encoded in three 8x8
|
||||
// MCUs, not two 16x8 MCUs.
|
||||
h, v = 1, 1
|
||||
|
||||
case 3:
|
||||
if i == 0 && v == 4 {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
case 4:
|
||||
// For 4-component images (either CMYK or YCbCrK), we only support two
|
||||
// hv vectors: [0x11 0x11 0x11 0x11] and [0x22 0x11 0x11 0x22].
|
||||
// Theoretically, 4-component JPEG images could mix and match hv values
|
||||
// but in practice, those two combinations are the only ones in use,
|
||||
// and it simplifies the applyBlack code below if we can assume that:
|
||||
// - for CMYK, the C and K channels have full samples, and if the M
|
||||
// and Y channels subsample, they subsample both horizontally and
|
||||
// vertically.
|
||||
// - for YCbCrK, the Y and K channels have full samples.
|
||||
switch i {
|
||||
case 0:
|
||||
if hv != 0x11 && hv != 0x22 {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
case 1, 2:
|
||||
if hv != 0x11 {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
case 3:
|
||||
if d.comp[0].h != h || d.comp[0].v != v {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.comp[i].h = h
|
||||
d.comp[i].v = v
|
||||
}
|
||||
if d.nComp == 3 {
|
||||
for i := range 3 {
|
||||
if d.maxH%d.comp[i].h != 0 || d.maxV%d.comp[i].v != 0 {
|
||||
return errUnsupportedSubsamplingRatio
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range d.nComp {
|
||||
d.comp[i].expand.h = d.maxH / d.comp[i].h
|
||||
d.comp[i].expand.v = d.maxV / d.comp[i].v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Specified in section B.2.4.1.
|
||||
func (d *decoder) processDQT(n int) error {
|
||||
loop:
|
||||
for n > 0 {
|
||||
n--
|
||||
x, err := d.readByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tq := x & 0x0f
|
||||
if tq > maxTq {
|
||||
return FormatError("bad Tq value")
|
||||
}
|
||||
switch x >> 4 {
|
||||
default:
|
||||
return FormatError("bad Pq value")
|
||||
case 0:
|
||||
if n < blockSize {
|
||||
break loop
|
||||
}
|
||||
n -= blockSize
|
||||
if err := d.readFull(d.tmp[:blockSize]); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range d.quant[tq] {
|
||||
d.quant[tq][i] = int32(d.tmp[i])
|
||||
}
|
||||
case 1:
|
||||
if n < 2*blockSize {
|
||||
break loop
|
||||
}
|
||||
n -= 2 * blockSize
|
||||
if err := d.readFull(d.tmp[:2*blockSize]); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range d.quant[tq] {
|
||||
d.quant[tq][i] = int32(d.tmp[2*i])<<8 | int32(d.tmp[2*i+1])
|
||||
}
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
return FormatError("DQT has wrong length")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Specified in section B.2.4.4.
|
||||
func (d *decoder) processDRI(n int) error {
|
||||
if n != 2 {
|
||||
return FormatError("DRI has wrong length")
|
||||
}
|
||||
if err := d.readFull(d.tmp[:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
d.ri = int(d.tmp[0])<<8 + int(d.tmp[1])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) processApp0Marker(n int) error {
|
||||
if n < 5 {
|
||||
return d.ignore(n)
|
||||
}
|
||||
if err := d.readFull(d.tmp[:5]); err != nil {
|
||||
return err
|
||||
}
|
||||
n -= 5
|
||||
|
||||
d.jfif = d.tmp[0] == 'J' && d.tmp[1] == 'F' && d.tmp[2] == 'I' && d.tmp[3] == 'F' && d.tmp[4] == '\x00'
|
||||
|
||||
if n > 0 {
|
||||
return d.ignore(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) processApp14Marker(n int) error {
|
||||
if n < 12 {
|
||||
return d.ignore(n)
|
||||
}
|
||||
if err := d.readFull(d.tmp[:12]); err != nil {
|
||||
return err
|
||||
}
|
||||
n -= 12
|
||||
|
||||
if d.tmp[0] == 'A' && d.tmp[1] == 'd' && d.tmp[2] == 'o' && d.tmp[3] == 'b' && d.tmp[4] == 'e' {
|
||||
d.adobeTransformValid = true
|
||||
d.adobeTransform = d.tmp[11]
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
return d.ignore(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode reads a JPEG image from r and returns it as an image.Image.
|
||||
func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) {
|
||||
d.r = r
|
||||
|
||||
// Check for the Start Of Image marker.
|
||||
if err := d.readFull(d.tmp[:2]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.tmp[0] != 0xff || d.tmp[1] != soiMarker {
|
||||
return nil, FormatError("missing SOI marker")
|
||||
}
|
||||
|
||||
// Process the remaining segments until the End Of Image marker.
|
||||
for {
|
||||
err := d.readFull(d.tmp[:2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for d.tmp[0] != 0xff {
|
||||
// Strictly speaking, this is a format error. However, libjpeg is
|
||||
// liberal in what it accepts. As of version 9, next_marker in
|
||||
// jdmarker.c treats this as a warning (JWRN_EXTRANEOUS_DATA) and
|
||||
// continues to decode the stream. Even before next_marker sees
|
||||
// extraneous data, jpeg_fill_bit_buffer in jdhuff.c reads as many
|
||||
// bytes as it can, possibly past the end of a scan's data. It
|
||||
// effectively puts back any markers that it overscanned (e.g. an
|
||||
// "\xff\xd9" EOI marker), but it does not put back non-marker data,
|
||||
// and thus it can silently ignore a small number of extraneous
|
||||
// non-marker bytes before next_marker has a chance to see them (and
|
||||
// print a warning).
|
||||
//
|
||||
// We are therefore also liberal in what we accept. Extraneous data
|
||||
// is silently ignored.
|
||||
//
|
||||
// This is similar to, but not exactly the same as, the restart
|
||||
// mechanism within a scan (the RST[0-7] markers).
|
||||
//
|
||||
// Note that extraneous 0xff bytes in e.g. SOS data are escaped as
|
||||
// "\xff\x00", and so are detected a little further down below.
|
||||
d.tmp[0] = d.tmp[1]
|
||||
d.tmp[1], err = d.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
marker := d.tmp[1]
|
||||
if marker == 0 {
|
||||
// Treat "\xff\x00" as extraneous data.
|
||||
continue
|
||||
}
|
||||
for marker == 0xff {
|
||||
// Section B.1.1.2 says, "Any marker may optionally be preceded by any
|
||||
// number of fill bytes, which are bytes assigned code X'FF'".
|
||||
marker, err = d.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if marker == eoiMarker { // End Of Image.
|
||||
break
|
||||
}
|
||||
if rst0Marker <= marker && marker <= rst7Marker {
|
||||
// Figures B.2 and B.16 of the specification suggest that restart markers should
|
||||
// only occur between Entropy Coded Segments and not after the final ECS.
|
||||
// However, some encoders may generate incorrect JPEGs with a final restart
|
||||
// marker. That restart marker will be seen here instead of inside the processSOS
|
||||
// method, and is ignored as a harmless error. Restart markers have no extra data,
|
||||
// so we check for this before we read the 16-bit length of the segment.
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the 16-bit length of the segment. The value includes the 2 bytes for the
|
||||
// length itself, so we subtract 2 to get the number of remaining bytes.
|
||||
if err = d.readFull(d.tmp[:2]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n := int(d.tmp[0])<<8 + int(d.tmp[1]) - 2
|
||||
if n < 0 {
|
||||
return nil, FormatError("short segment length")
|
||||
}
|
||||
|
||||
switch marker {
|
||||
case sof0Marker, sof1Marker, sof2Marker:
|
||||
d.baseline = marker == sof0Marker
|
||||
d.progressive = marker == sof2Marker
|
||||
err = d.processSOF(n)
|
||||
if configOnly && d.jfif {
|
||||
return nil, err
|
||||
}
|
||||
case dhtMarker:
|
||||
if configOnly {
|
||||
err = d.ignore(n)
|
||||
} else {
|
||||
err = d.processDHT(n)
|
||||
}
|
||||
case dqtMarker:
|
||||
if configOnly {
|
||||
err = d.ignore(n)
|
||||
} else {
|
||||
err = d.processDQT(n)
|
||||
}
|
||||
case sosMarker:
|
||||
if configOnly {
|
||||
return nil, nil
|
||||
}
|
||||
err = d.processSOS(n)
|
||||
case driMarker:
|
||||
if configOnly {
|
||||
err = d.ignore(n)
|
||||
} else {
|
||||
err = d.processDRI(n)
|
||||
}
|
||||
case app0Marker:
|
||||
err = d.processApp0Marker(n)
|
||||
case app14Marker:
|
||||
err = d.processApp14Marker(n)
|
||||
default:
|
||||
if app0Marker <= marker && marker <= app15Marker || marker == comMarker {
|
||||
err = d.ignore(n)
|
||||
} else if marker < 0xc0 { // See Table B.1 "Marker code assignments".
|
||||
err = FormatError("unknown marker")
|
||||
} else {
|
||||
err = UnsupportedError("unknown marker")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if d.progressive {
|
||||
if err := d.reconstructProgressiveImage(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if d.img1 != nil {
|
||||
return d.img1, nil
|
||||
}
|
||||
if d.img3 != nil {
|
||||
if d.blackPix != nil {
|
||||
return d.applyBlack()
|
||||
} else if d.isRGB() {
|
||||
return d.convertToRGB()
|
||||
}
|
||||
return d.img3, nil
|
||||
}
|
||||
return nil, FormatError("missing SOS marker")
|
||||
}
|
||||
|
||||
// applyBlack combines d.img3 and d.blackPix into a CMYK image. The formula
|
||||
// used depends on whether the JPEG image is stored as CMYK or YCbCrK,
|
||||
// indicated by the APP14 (Adobe) metadata.
|
||||
//
|
||||
// Adobe CMYK JPEG images are inverted, where 255 means no ink instead of full
|
||||
// ink, so we apply "v = 255 - v" at various points. Note that a double
|
||||
// inversion is a no-op, so inversions might be implicit in the code below.
|
||||
func (d *decoder) applyBlack() (image.Image, error) {
|
||||
if !d.adobeTransformValid {
|
||||
return nil, UnsupportedError("unknown color model: 4-component JPEG doesn't have Adobe APP14 metadata")
|
||||
}
|
||||
|
||||
// If the 4-component JPEG image isn't explicitly marked as "Unknown (RGB
|
||||
// or CMYK)" as per
|
||||
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
|
||||
// we assume that it is YCbCrK. This matches libjpeg's jdapimin.c.
|
||||
if d.adobeTransform != adobeTransformUnknown {
|
||||
// Convert the YCbCr part of the YCbCrK to RGB, invert the RGB to get
|
||||
// CMY, and patch in the original K. The RGB to CMY inversion cancels
|
||||
// out the 'Adobe inversion' described in the applyBlack doc comment
|
||||
// above, so in practice, only the fourth channel (black) is inverted.
|
||||
bounds := d.img3.Bounds()
|
||||
img := image.NewRGBA(bounds)
|
||||
src := nrgba.NewNRGBAScanner(d.img3)
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
size := w * 4
|
||||
if err := parallel.Run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * img.Stride
|
||||
src.Scan(0, y, w, y+1, img.Pix[i:i+size])
|
||||
}
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for iBase, y := 0, bounds.Min.Y; y < bounds.Max.Y; iBase, y = iBase+img.Stride, y+1 {
|
||||
for i, x := iBase+3, bounds.Min.X; x < bounds.Max.X; i, x = i+4, x+1 {
|
||||
img.Pix[i] = 255 - d.blackPix[(y-bounds.Min.Y)*d.blackStride+(x-bounds.Min.X)]
|
||||
}
|
||||
}
|
||||
return &image.CMYK{
|
||||
Pix: img.Pix,
|
||||
Stride: img.Stride,
|
||||
Rect: img.Rect,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// The first three channels (cyan, magenta, yellow) of the CMYK
|
||||
// were decoded into d.img3, but each channel was decoded into a separate
|
||||
// []byte slice, and some channels may be subsampled. We interleave the
|
||||
// separate channels into an image.CMYK's single []byte slice containing 4
|
||||
// contiguous bytes per pixel.
|
||||
bounds := d.img3.Bounds()
|
||||
img := image.NewCMYK(bounds)
|
||||
|
||||
translations := [4]struct {
|
||||
src []byte
|
||||
stride int
|
||||
}{
|
||||
{d.img3.Y, d.img3.YStride},
|
||||
{d.img3.Cb, d.img3.CStride},
|
||||
{d.img3.Cr, d.img3.CStride},
|
||||
{d.blackPix, d.blackStride},
|
||||
}
|
||||
for t, translation := range translations {
|
||||
subsample := d.comp[t].h != d.comp[0].h || d.comp[t].v != d.comp[0].v
|
||||
for iBase, y := 0, bounds.Min.Y; y < bounds.Max.Y; iBase, y = iBase+img.Stride, y+1 {
|
||||
sy := y - bounds.Min.Y
|
||||
if subsample {
|
||||
sy /= 2
|
||||
}
|
||||
for i, x := iBase+t, bounds.Min.X; x < bounds.Max.X; i, x = i+4, x+1 {
|
||||
sx := x - bounds.Min.X
|
||||
if subsample {
|
||||
sx /= 2
|
||||
}
|
||||
img.Pix[i] = 255 - translation.src[sy*translation.stride+sx]
|
||||
}
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (d *decoder) isRGB() bool {
|
||||
if d.jfif {
|
||||
return false
|
||||
}
|
||||
if d.adobeTransformValid && d.adobeTransform == adobeTransformUnknown {
|
||||
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
|
||||
// says that 0 means Unknown (and in practice RGB) and 1 means YCbCr.
|
||||
return true
|
||||
}
|
||||
return d.comp[0].c == 'R' && d.comp[1].c == 'G' && d.comp[2].c == 'B'
|
||||
}
|
||||
|
||||
func (d *decoder) convertToRGB() (image.Image, error) {
|
||||
cScale := d.comp[0].h / d.comp[1].h
|
||||
bounds := d.img3.Bounds()
|
||||
img := nrgb.NewNRGB(bounds)
|
||||
parallel.Run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
po := img.PixOffset(bounds.Min.X, y)
|
||||
yo := d.img3.YOffset(bounds.Min.X, y)
|
||||
co := d.img3.COffset(bounds.Min.X, y)
|
||||
for i, iMax := 0, bounds.Max.X-bounds.Min.X; i < iMax; i++ {
|
||||
img.Pix[po+3*i+0] = d.img3.Y[yo+i]
|
||||
img.Pix[po+3*i+1] = d.img3.Cb[co+i/cScale]
|
||||
img.Pix[po+3*i+2] = d.img3.Cr[co+i/cScale]
|
||||
}
|
||||
}
|
||||
}, bounds.Min.Y, bounds.Max.Y)
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// Decode reads a JPEG image from r and returns it as an [image.Image].
|
||||
func Decode(r io.Reader) (image.Image, error) {
|
||||
var d decoder
|
||||
return d.decode(r, false)
|
||||
}
|
||||
|
||||
// DecodeConfig returns the color model and dimensions of a JPEG image without
|
||||
// decoding the entire image.
|
||||
func DecodeConfig(r io.Reader) (image.Config, error) {
|
||||
var d decoder
|
||||
if _, err := d.decode(r, true); err != nil {
|
||||
return image.Config{}, err
|
||||
}
|
||||
switch d.nComp {
|
||||
case 1:
|
||||
return image.Config{
|
||||
ColorModel: color.GrayModel,
|
||||
Width: d.width,
|
||||
Height: d.height,
|
||||
}, nil
|
||||
case 3:
|
||||
cm := color.YCbCrModel
|
||||
if d.isRGB() {
|
||||
cm = nrgb.Model
|
||||
}
|
||||
return image.Config{
|
||||
ColorModel: cm,
|
||||
Width: d.width,
|
||||
Height: d.height,
|
||||
}, nil
|
||||
case 4:
|
||||
return image.Config{
|
||||
ColorModel: color.CMYKModel,
|
||||
Width: d.width,
|
||||
Height: d.height,
|
||||
}, nil
|
||||
}
|
||||
return image.Config{}, FormatError("missing SOF marker")
|
||||
}
|
||||
583
vendor/github.com/kovidgoyal/imaging/jpeg/scan.go
generated
vendored
Normal file
583
vendor/github.com/kovidgoyal/imaging/jpeg/scan.go
generated
vendored
Normal file
@@ -0,0 +1,583 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jpeg
|
||||
|
||||
import (
|
||||
"image"
|
||||
)
|
||||
|
||||
// makeImg allocates and initializes the destination image.
|
||||
func (d *decoder) makeImg(mxx, myy int) {
|
||||
if d.nComp == 1 {
|
||||
m := image.NewGray(image.Rect(0, 0, 8*mxx, 8*myy))
|
||||
d.img1 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.Gray)
|
||||
return
|
||||
}
|
||||
subsampleRatio := image.YCbCrSubsampleRatio444
|
||||
if d.comp[1].h != d.comp[2].h || d.comp[1].v != d.comp[2].v || d.maxH != d.comp[0].h || d.maxV != d.comp[0].v {
|
||||
d.flex = true
|
||||
} else {
|
||||
if d.force_flex {
|
||||
d.flex = true
|
||||
} else {
|
||||
hRatio := d.maxH / d.comp[1].h
|
||||
vRatio := d.maxV / d.comp[1].v
|
||||
switch hRatio<<4 | vRatio {
|
||||
case 0x11:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio444
|
||||
case 0x12:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio440
|
||||
case 0x21:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio422
|
||||
case 0x22:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio420
|
||||
case 0x41:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio411
|
||||
case 0x42:
|
||||
subsampleRatio = image.YCbCrSubsampleRatio410
|
||||
default:
|
||||
d.flex = true
|
||||
}
|
||||
}
|
||||
}
|
||||
m := image.NewYCbCr(image.Rect(0, 0, 8*d.maxH*mxx, 8*d.maxV*myy), subsampleRatio)
|
||||
d.img3 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.YCbCr)
|
||||
|
||||
if d.nComp == 4 {
|
||||
h3, v3 := d.comp[3].h, d.comp[3].v
|
||||
d.blackPix = make([]byte, 8*h3*mxx*8*v3*myy)
|
||||
d.blackStride = 8 * h3 * mxx
|
||||
}
|
||||
}
|
||||
|
||||
// Specified in section B.2.3.
|
||||
func (d *decoder) processSOS(n int) error {
|
||||
if d.nComp == 0 {
|
||||
return FormatError("missing SOF marker")
|
||||
}
|
||||
if n < 6 || 4+2*d.nComp < n || n%2 != 0 {
|
||||
return FormatError("SOS has wrong length")
|
||||
}
|
||||
if err := d.readFull(d.tmp[:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
nComp := int(d.tmp[0])
|
||||
if n != 4+2*nComp {
|
||||
return FormatError("SOS length inconsistent with number of components")
|
||||
}
|
||||
var scan [maxComponents]struct {
|
||||
compIndex uint8
|
||||
td uint8 // DC table selector.
|
||||
ta uint8 // AC table selector.
|
||||
}
|
||||
totalHV := 0
|
||||
for i := range nComp {
|
||||
cs := d.tmp[1+2*i] // Component selector.
|
||||
compIndex := -1
|
||||
for j, comp := range d.comp[:d.nComp] {
|
||||
if cs == comp.c {
|
||||
compIndex = j
|
||||
}
|
||||
}
|
||||
if compIndex < 0 {
|
||||
return FormatError("unknown component selector")
|
||||
}
|
||||
scan[i].compIndex = uint8(compIndex)
|
||||
// Section B.2.3 states that "the value of Cs_j shall be different from
|
||||
// the values of Cs_1 through Cs_(j-1)". Since we have previously
|
||||
// verified that a frame's component identifiers (C_i values in section
|
||||
// B.2.2) are unique, it suffices to check that the implicit indexes
|
||||
// into d.comp are unique.
|
||||
for j := range i {
|
||||
if scan[i].compIndex == scan[j].compIndex {
|
||||
return FormatError("repeated component selector")
|
||||
}
|
||||
}
|
||||
totalHV += d.comp[compIndex].h * d.comp[compIndex].v
|
||||
|
||||
// The baseline t <= 1 restriction is specified in table B.3.
|
||||
scan[i].td = d.tmp[2+2*i] >> 4
|
||||
if t := scan[i].td; t > maxTh || (d.baseline && t > 1) {
|
||||
return FormatError("bad Td value")
|
||||
}
|
||||
scan[i].ta = d.tmp[2+2*i] & 0x0f
|
||||
if t := scan[i].ta; t > maxTh || (d.baseline && t > 1) {
|
||||
return FormatError("bad Ta value")
|
||||
}
|
||||
}
|
||||
// Section B.2.3 states that if there is more than one component then the
|
||||
// total H*V values in a scan must be <= 10.
|
||||
if d.nComp > 1 && totalHV > 10 {
|
||||
return FormatError("total sampling factors too large")
|
||||
}
|
||||
|
||||
// zigStart and zigEnd are the spectral selection bounds.
|
||||
// ah and al are the successive approximation high and low values.
|
||||
// The spec calls these values Ss, Se, Ah and Al.
|
||||
//
|
||||
// For progressive JPEGs, these are the two more-or-less independent
|
||||
// aspects of progression. Spectral selection progression is when not
|
||||
// all of a block's 64 DCT coefficients are transmitted in one pass.
|
||||
// For example, three passes could transmit coefficient 0 (the DC
|
||||
// component), coefficients 1-5, and coefficients 6-63, in zig-zag
|
||||
// order. Successive approximation is when not all of the bits of a
|
||||
// band of coefficients are transmitted in one pass. For example,
|
||||
// three passes could transmit the 6 most significant bits, followed
|
||||
// by the second-least significant bit, followed by the least
|
||||
// significant bit.
|
||||
//
|
||||
// For sequential JPEGs, these parameters are hard-coded to 0/63/0/0, as
|
||||
// per table B.3.
|
||||
zigStart, zigEnd, ah, al := int32(0), int32(blockSize-1), uint32(0), uint32(0)
|
||||
if d.progressive {
|
||||
zigStart = int32(d.tmp[1+2*nComp])
|
||||
zigEnd = int32(d.tmp[2+2*nComp])
|
||||
ah = uint32(d.tmp[3+2*nComp] >> 4)
|
||||
al = uint32(d.tmp[3+2*nComp] & 0x0f)
|
||||
if (zigStart == 0 && zigEnd != 0) || zigStart > zigEnd || blockSize <= zigEnd {
|
||||
return FormatError("bad spectral selection bounds")
|
||||
}
|
||||
if zigStart != 0 && nComp != 1 {
|
||||
return FormatError("progressive AC coefficients for more than one component")
|
||||
}
|
||||
if ah != 0 && ah != al+1 {
|
||||
return FormatError("bad successive approximation values")
|
||||
}
|
||||
}
|
||||
|
||||
// mxx and myy are the number of MCUs (Minimum Coded Units) in the image.
|
||||
h0, v0 := d.comp[0].h, d.comp[0].v // The h and v values from the Y components.
|
||||
mxx := (d.width + 8*h0 - 1) / (8 * h0)
|
||||
myy := (d.height + 8*v0 - 1) / (8 * v0)
|
||||
if d.img1 == nil && d.img3 == nil {
|
||||
d.makeImg(mxx, myy)
|
||||
}
|
||||
if d.progressive {
|
||||
for i := range nComp {
|
||||
compIndex := scan[i].compIndex
|
||||
if d.progCoeffs[compIndex] == nil {
|
||||
d.progCoeffs[compIndex] = make([]block, mxx*myy*d.comp[compIndex].h*d.comp[compIndex].v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.bits = bits{}
|
||||
mcu, expectedRST := 0, uint8(rst0Marker)
|
||||
var (
|
||||
// b is the decoded coefficients, in natural (not zig-zag) order.
|
||||
b block
|
||||
dc [maxComponents]int32
|
||||
// bx and by are the location of the current block, in units of 8x8
|
||||
// blocks: the third block in the first row has (bx, by) = (2, 0).
|
||||
bx, by int
|
||||
blockCount int
|
||||
)
|
||||
for my := range myy {
|
||||
for mx := range mxx {
|
||||
for i := range nComp {
|
||||
compIndex := scan[i].compIndex
|
||||
hi := d.comp[compIndex].h
|
||||
vi := d.comp[compIndex].v
|
||||
for j := 0; j < hi*vi; j++ {
|
||||
// The blocks are traversed one MCU at a time. For 4:2:0 chroma
|
||||
// subsampling, there are four Y 8x8 blocks in every 16x16 MCU.
|
||||
//
|
||||
// For a sequential 32x16 pixel image, the Y blocks visiting order is:
|
||||
// 0 1 4 5
|
||||
// 2 3 6 7
|
||||
//
|
||||
// For progressive images, the interleaved scans (those with nComp > 1)
|
||||
// are traversed as above, but non-interleaved scans are traversed left
|
||||
// to right, top to bottom:
|
||||
// 0 1 2 3
|
||||
// 4 5 6 7
|
||||
// Only DC scans (zigStart == 0) can be interleaved. AC scans must have
|
||||
// only one component.
|
||||
//
|
||||
// To further complicate matters, for non-interleaved scans, there is no
|
||||
// data for any blocks that are inside the image at the MCU level but
|
||||
// outside the image at the pixel level. For example, a 24x16 pixel 4:2:0
|
||||
// progressive image consists of two 16x16 MCUs. The interleaved scans
|
||||
// will process 8 Y blocks:
|
||||
// 0 1 4 5
|
||||
// 2 3 6 7
|
||||
// The non-interleaved scans will process only 6 Y blocks:
|
||||
// 0 1 2
|
||||
// 3 4 5
|
||||
if nComp != 1 {
|
||||
bx = hi*mx + j%hi
|
||||
by = vi*my + j/hi
|
||||
} else {
|
||||
q := mxx * hi
|
||||
bx = blockCount % q
|
||||
by = blockCount / q
|
||||
blockCount++
|
||||
if bx*8 >= d.width || by*8 >= d.height {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Load the previous partially decoded coefficients, if applicable.
|
||||
if d.progressive {
|
||||
b = d.progCoeffs[compIndex][by*mxx*hi+bx]
|
||||
} else {
|
||||
b = block{}
|
||||
}
|
||||
|
||||
if ah != 0 {
|
||||
if err := d.refine(&b, &d.huff[acTable][scan[i].ta], zigStart, zigEnd, 1<<al); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
zig := zigStart
|
||||
if zig == 0 {
|
||||
zig++
|
||||
// Decode the DC coefficient, as specified in section F.2.2.1.
|
||||
value, err := d.decodeHuffman(&d.huff[dcTable][scan[i].td])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if value > 16 {
|
||||
return UnsupportedError("excessive DC component")
|
||||
}
|
||||
dcDelta, err := d.receiveExtend(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dc[compIndex] += dcDelta
|
||||
b[0] = dc[compIndex] << al
|
||||
}
|
||||
|
||||
if zig <= zigEnd && d.eobRun > 0 {
|
||||
d.eobRun--
|
||||
} else {
|
||||
// Decode the AC coefficients, as specified in section F.2.2.2.
|
||||
huff := &d.huff[acTable][scan[i].ta]
|
||||
for ; zig <= zigEnd; zig++ {
|
||||
value, err := d.decodeHuffman(huff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val0 := value >> 4
|
||||
val1 := value & 0x0f
|
||||
if val1 != 0 {
|
||||
zig += int32(val0)
|
||||
if zig > zigEnd {
|
||||
break
|
||||
}
|
||||
ac, err := d.receiveExtend(val1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b[unzig[zig]] = ac << al
|
||||
} else {
|
||||
if val0 != 0x0f {
|
||||
d.eobRun = uint16(1 << val0)
|
||||
if val0 != 0 {
|
||||
bits, err := d.decodeBits(int32(val0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.eobRun |= uint16(bits)
|
||||
}
|
||||
d.eobRun--
|
||||
break
|
||||
}
|
||||
zig += 0x0f
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.progressive {
|
||||
// Save the coefficients.
|
||||
d.progCoeffs[compIndex][by*mxx*hi+bx] = b
|
||||
// At this point, we could call reconstructBlock to dequantize and perform the
|
||||
// inverse DCT, to save early stages of a progressive image to the *image.YCbCr
|
||||
// buffers (the whole point of progressive encoding), but in Go, the jpeg.Decode
|
||||
// function does not return until the entire image is decoded, so we "continue"
|
||||
// here to avoid wasted computation. Instead, reconstructBlock is called on each
|
||||
// accumulated block by the reconstructProgressiveImage method after all of the
|
||||
// SOS markers are processed.
|
||||
continue
|
||||
}
|
||||
d.reconstructBlock(&b, bx, by, int(compIndex))
|
||||
} // for j
|
||||
} // for i
|
||||
mcu++
|
||||
if d.ri > 0 && mcu%d.ri == 0 && mcu < mxx*myy {
|
||||
// For well-formed input, the RST[0-7] restart marker follows
|
||||
// immediately. For corrupt input, call findRST to try to
|
||||
// resynchronize.
|
||||
if err := d.readFull(d.tmp[:2]); err != nil {
|
||||
return err
|
||||
} else if d.tmp[0] != 0xff || d.tmp[1] != expectedRST {
|
||||
if err := d.findRST(expectedRST); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
expectedRST++
|
||||
if expectedRST == rst7Marker+1 {
|
||||
expectedRST = rst0Marker
|
||||
}
|
||||
// Reset the Huffman decoder.
|
||||
d.bits = bits{}
|
||||
// Reset the DC components, as per section F.2.1.3.1.
|
||||
dc = [maxComponents]int32{}
|
||||
// Reset the progressive decoder state, as per section G.1.2.2.
|
||||
d.eobRun = 0
|
||||
}
|
||||
} // for mx
|
||||
} // for my
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// refine decodes a successive approximation refinement block, as specified in
|
||||
// section G.1.2.
|
||||
func (d *decoder) refine(b *block, h *huffman, zigStart, zigEnd, delta int32) error {
|
||||
// Refining a DC component is trivial.
|
||||
if zigStart == 0 {
|
||||
if zigEnd != 0 {
|
||||
panic("unreachable")
|
||||
}
|
||||
bit, err := d.decodeBit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bit {
|
||||
b[0] |= delta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Refining AC components is more complicated; see sections G.1.2.2 and G.1.2.3.
|
||||
zig := zigStart
|
||||
if d.eobRun == 0 {
|
||||
loop:
|
||||
for ; zig <= zigEnd; zig++ {
|
||||
z := int32(0)
|
||||
value, err := d.decodeHuffman(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val0 := value >> 4
|
||||
val1 := value & 0x0f
|
||||
|
||||
switch val1 {
|
||||
case 0:
|
||||
if val0 != 0x0f {
|
||||
d.eobRun = uint16(1 << val0)
|
||||
if val0 != 0 {
|
||||
bits, err := d.decodeBits(int32(val0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.eobRun |= uint16(bits)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
case 1:
|
||||
z = delta
|
||||
bit, err := d.decodeBit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bit {
|
||||
z = -z
|
||||
}
|
||||
default:
|
||||
return FormatError("unexpected Huffman code")
|
||||
}
|
||||
|
||||
zig, err = d.refineNonZeroes(b, zig, zigEnd, int32(val0), delta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if zig > zigEnd {
|
||||
return FormatError("too many coefficients")
|
||||
}
|
||||
if z != 0 {
|
||||
b[unzig[zig]] = z
|
||||
}
|
||||
}
|
||||
}
|
||||
if d.eobRun > 0 {
|
||||
d.eobRun--
|
||||
if _, err := d.refineNonZeroes(b, zig, zigEnd, -1, delta); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// refineNonZeroes refines non-zero entries of b in zig-zag order. If nz >= 0,
|
||||
// the first nz zero entries are skipped over.
|
||||
func (d *decoder) refineNonZeroes(b *block, zig, zigEnd, nz, delta int32) (int32, error) {
|
||||
for ; zig <= zigEnd; zig++ {
|
||||
u := unzig[zig]
|
||||
if b[u] == 0 {
|
||||
if nz == 0 {
|
||||
break
|
||||
}
|
||||
nz--
|
||||
continue
|
||||
}
|
||||
bit, err := d.decodeBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !bit {
|
||||
continue
|
||||
}
|
||||
if b[u] >= 0 {
|
||||
b[u] += delta
|
||||
} else {
|
||||
b[u] -= delta
|
||||
}
|
||||
}
|
||||
return zig, nil
|
||||
}
|
||||
|
||||
func (d *decoder) reconstructProgressiveImage() error {
|
||||
// The h0, mxx, by and bx variables have the same meaning as in the
|
||||
// processSOS method.
|
||||
h0 := d.comp[0].h
|
||||
mxx := (d.width + 8*h0 - 1) / (8 * h0)
|
||||
for i := 0; i < d.nComp; i++ {
|
||||
if d.progCoeffs[i] == nil {
|
||||
continue
|
||||
}
|
||||
v := 8 * d.comp[0].v / d.comp[i].v
|
||||
h := 8 * d.comp[0].h / d.comp[i].h
|
||||
stride := mxx * d.comp[i].h
|
||||
for by := 0; by*v < d.height; by++ {
|
||||
for bx := 0; bx*h < d.width; bx++ {
|
||||
d.reconstructBlock(&d.progCoeffs[i][by*stride+bx], bx, by, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func level_shift(c int32) uint8 {
|
||||
if c < -128 {
|
||||
return 0
|
||||
}
|
||||
if c > 127 {
|
||||
return 255
|
||||
}
|
||||
return uint8(c + 128)
|
||||
}
|
||||
|
||||
func (d *decoder) storeFlexBlock(b *block, bx, by, compIndex int) {
|
||||
h, v := d.comp[compIndex].expand.h, d.comp[compIndex].expand.v
|
||||
dst, stride := []byte(nil), 0
|
||||
bx, by = bx*h, by*v
|
||||
switch compIndex {
|
||||
case 0:
|
||||
dst, stride = d.img3.Y[8*(by*d.img3.YStride+bx):], d.img3.YStride
|
||||
case 1:
|
||||
dst, stride = d.img3.Cb[8*(by*d.img3.CStride+bx):], d.img3.CStride
|
||||
case 2:
|
||||
dst, stride = d.img3.Cr[8*(by*d.img3.CStride+bx):], d.img3.CStride
|
||||
case 3:
|
||||
dst, stride = d.blackPix[8*(by*d.blackStride+bx):], d.blackStride
|
||||
}
|
||||
for y := range 8 {
|
||||
y8 := y * 8
|
||||
yv := y * v
|
||||
for x := range 8 {
|
||||
val := level_shift(b[y8+x])
|
||||
xh := x * h
|
||||
for yy := range v {
|
||||
for xx := range h {
|
||||
dst[(yv+yy)*stride+xh+xx] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reconstructBlock dequantizes, performs the inverse DCT and stores the block
|
||||
// to the image.
|
||||
func (d *decoder) reconstructBlock(b *block, bx, by, compIndex int) {
|
||||
qt := &d.quant[d.comp[compIndex].tq]
|
||||
for zig := range blockSize {
|
||||
b[unzig[zig]] *= qt[zig]
|
||||
}
|
||||
idct(b)
|
||||
dst, stride := []byte(nil), 0
|
||||
if d.nComp == 1 {
|
||||
dst, stride = d.img1.Pix[8*(by*d.img1.Stride+bx):], d.img1.Stride
|
||||
} else {
|
||||
if d.flex {
|
||||
d.storeFlexBlock(b, bx, by, compIndex)
|
||||
return
|
||||
}
|
||||
switch compIndex {
|
||||
case 0:
|
||||
dst, stride = d.img3.Y[8*(by*d.img3.YStride+bx):], d.img3.YStride
|
||||
case 1:
|
||||
dst, stride = d.img3.Cb[8*(by*d.img3.CStride+bx):], d.img3.CStride
|
||||
case 2:
|
||||
dst, stride = d.img3.Cr[8*(by*d.img3.CStride+bx):], d.img3.CStride
|
||||
case 3:
|
||||
dst, stride = d.blackPix[8*(by*d.blackStride+bx):], d.blackStride
|
||||
}
|
||||
}
|
||||
// Level shift by +128, clip to [0, 255], and write to dst.
|
||||
for y := range 8 {
|
||||
y8 := y * 8
|
||||
yStride := y * stride
|
||||
for x := range 8 {
|
||||
dst[yStride+x] = level_shift(b[y8+x])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findRST advances past the next RST restart marker that matches expectedRST.
|
||||
// Other than I/O errors, it is also an error if we encounter an {0xFF, M}
|
||||
// two-byte marker sequence where M is not 0x00, 0xFF or the expectedRST.
|
||||
//
|
||||
// This is similar to libjpeg's jdmarker.c's next_marker function.
|
||||
// https://github.com/libjpeg-turbo/libjpeg-turbo/blob/2dfe6c0fe9e18671105e94f7cbf044d4a1d157e6/jdmarker.c#L892-L935
|
||||
//
|
||||
// Precondition: d.tmp[:2] holds the next two bytes of JPEG-encoded input
|
||||
// (input in the d.readFull sense).
|
||||
func (d *decoder) findRST(expectedRST uint8) error {
|
||||
for {
|
||||
// i is the index such that, at the bottom of the loop, we read 2-i
|
||||
// bytes into d.tmp[i:2], maintaining the invariant that d.tmp[:2]
|
||||
// holds the next two bytes of JPEG-encoded input. It is either 0 or 1,
|
||||
// so that each iteration advances by 1 or 2 bytes (or returns).
|
||||
i := 0
|
||||
|
||||
if d.tmp[0] == 0xff {
|
||||
if d.tmp[1] == expectedRST {
|
||||
return nil
|
||||
} else if d.tmp[1] == 0xff {
|
||||
i = 1
|
||||
} else if d.tmp[1] != 0x00 {
|
||||
// libjpeg's jdmarker.c's jpeg_resync_to_restart does something
|
||||
// fancy here, treating RST markers within two (modulo 8) of
|
||||
// expectedRST differently from RST markers that are 'more
|
||||
// distant'. Until we see evidence that recovering from such
|
||||
// cases is frequent enough to be worth the complexity, we take
|
||||
// a simpler approach for now. Any marker that's not 0x00, 0xff
|
||||
// or expectedRST is a fatal FormatError.
|
||||
return FormatError("bad RST marker")
|
||||
}
|
||||
|
||||
} else if d.tmp[1] == 0xff {
|
||||
d.tmp[0] = 0xff
|
||||
i = 1
|
||||
}
|
||||
|
||||
if err := d.readFull(d.tmp[i:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
528
vendor/github.com/kovidgoyal/imaging/magick/magick.go
generated
vendored
Normal file
528
vendor/github.com/kovidgoyal/imaging/magick/magick.go
generated
vendored
Normal file
@@ -0,0 +1,528 @@
|
||||
package magick
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/gifmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
var MagickExe = sync.OnceValue(func() string {
|
||||
ans, err := exec.LookPath("magick")
|
||||
if err != nil || ans == "" {
|
||||
ans = "magick"
|
||||
}
|
||||
return ans
|
||||
})
|
||||
var HasMagick = sync.OnceValue(func() bool { return MagickExe() != "magick" })
|
||||
|
||||
var TempDirInRAMIfPossible = sync.OnceValue(func() string { return get_temp_dir() })
|
||||
|
||||
type ImageFrame struct {
|
||||
Width, Height, Left, Top int
|
||||
Number int // 1-based number
|
||||
Compose_onto int // number of frame to compose onto
|
||||
Delay_ms int32 // negative for gapless frame, zero ignored, positive is number of ms
|
||||
Replace bool // do a replace rather than an alpha blend
|
||||
Is_opaque bool
|
||||
Img image.Image
|
||||
}
|
||||
|
||||
func check_resize(frame *ImageFrame, filename string) error {
|
||||
// ImageMagick sometimes generates RGBA images smaller than the specified
|
||||
// size. See https://github.com/kovidgoyal/kitty/issues/276 for examples
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sz := int(s.Size())
|
||||
bytes_per_pixel := 4
|
||||
if frame.Is_opaque {
|
||||
bytes_per_pixel = 3
|
||||
}
|
||||
expected_size := bytes_per_pixel * frame.Width * frame.Height
|
||||
if sz < expected_size {
|
||||
if bytes_per_pixel == 4 && sz == 3*frame.Width*frame.Height {
|
||||
frame.Is_opaque = true
|
||||
return nil
|
||||
}
|
||||
missing := expected_size - sz
|
||||
if missing%(bytes_per_pixel*frame.Width) != 0 {
|
||||
return fmt.Errorf("ImageMagick failed to resize correctly. It generated %d < %d of data (w=%d h=%d bpp=%d frame-number: %d)", sz, expected_size, frame.Width, frame.Height, bytes_per_pixel, frame.Number)
|
||||
}
|
||||
frame.Height -= missing / (bytes_per_pixel * frame.Width)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunMagick(i *input, cmd []string) ([]byte, error) {
|
||||
cmd = append([]string{MagickExe()}, cmd...)
|
||||
c := exec.Command(cmd[0], cmd[1:]...)
|
||||
if i.os_file != nil {
|
||||
c.ExtraFiles = append(c.ExtraFiles, i.os_file)
|
||||
}
|
||||
output, err := c.Output()
|
||||
if err != nil {
|
||||
var exit_err *exec.ExitError
|
||||
if errors.As(err, &exit_err) {
|
||||
return nil, fmt.Errorf("Running the command: %s\nFailed with error:\n%s", strings.Join(cmd, " "), string(exit_err.Stderr))
|
||||
}
|
||||
return nil, fmt.Errorf("Could not find the program: %#v. Is ImageMagick installed and in your PATH?", cmd[0])
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
type IdentifyOutput struct {
|
||||
Fmt, Canvas, Transparency, Gap, Index, Size, Dpi, Dispose, Orientation, Colorspace string
|
||||
}
|
||||
|
||||
type DisposeOp int
|
||||
|
||||
const (
|
||||
DisposeNone DisposeOp = iota
|
||||
DisposeBackground
|
||||
DisposePrevious
|
||||
)
|
||||
|
||||
type IdentifyRecord struct {
|
||||
Fmt_uppercase string
|
||||
Gap int
|
||||
Canvas struct{ Width, Height, Left, Top int }
|
||||
Width, Height int
|
||||
Dpi struct{ X, Y float64 }
|
||||
Index int
|
||||
Is_opaque bool
|
||||
Needs_blend bool
|
||||
Disposal DisposeOp
|
||||
Dimensions_swapped bool
|
||||
ColorSpace string
|
||||
}
|
||||
|
||||
func parse_identify_record(ans *IdentifyRecord, raw *IdentifyOutput) (err error) {
|
||||
ans.Fmt_uppercase = strings.ToUpper(raw.Fmt)
|
||||
if raw.Gap != "" {
|
||||
ans.Gap, err = strconv.Atoi(raw.Gap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid gap value in identify output: %s", raw.Gap)
|
||||
}
|
||||
ans.Gap = max(0, ans.Gap)
|
||||
}
|
||||
area, pos, found := strings.Cut(raw.Canvas, "+")
|
||||
ok := false
|
||||
if found {
|
||||
w, h, found := strings.Cut(area, "x")
|
||||
if found {
|
||||
ans.Canvas.Width, err = strconv.Atoi(w)
|
||||
if err == nil {
|
||||
ans.Canvas.Height, err = strconv.Atoi(h)
|
||||
if err == nil {
|
||||
x, y, found := strings.Cut(pos, "+")
|
||||
if found {
|
||||
ans.Canvas.Left, err = strconv.Atoi(x)
|
||||
if err == nil {
|
||||
if ans.Canvas.Top, err = strconv.Atoi(y); err == nil {
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("Invalid canvas value in identify output: %s", raw.Canvas)
|
||||
}
|
||||
w, h, found := strings.Cut(raw.Size, "x")
|
||||
ok = false
|
||||
if found {
|
||||
ans.Width, err = strconv.Atoi(w)
|
||||
if err == nil {
|
||||
if ans.Height, err = strconv.Atoi(h); err == nil {
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("Invalid size value in identify output: %s", raw.Size)
|
||||
}
|
||||
x, y, found := strings.Cut(raw.Dpi, "x")
|
||||
ok = false
|
||||
if found {
|
||||
ans.Dpi.X, err = strconv.ParseFloat(x, 64)
|
||||
if err == nil {
|
||||
if ans.Dpi.Y, err = strconv.ParseFloat(y, 64); err == nil {
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("Invalid dpi value in identify output: %s", raw.Dpi)
|
||||
}
|
||||
ans.Index, err = strconv.Atoi(raw.Index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid index value in identify output: %s", raw.Index)
|
||||
}
|
||||
q := strings.ToLower(raw.Transparency)
|
||||
if q == "blend" || q == "true" {
|
||||
ans.Is_opaque = false
|
||||
} else {
|
||||
ans.Is_opaque = true
|
||||
}
|
||||
ans.Needs_blend = q == "blend"
|
||||
switch strings.ToLower(raw.Dispose) {
|
||||
case "none", "undefined":
|
||||
ans.Disposal = DisposeNone
|
||||
case "background":
|
||||
ans.Disposal = DisposeBackground
|
||||
case "previous":
|
||||
ans.Disposal = DisposePrevious
|
||||
default:
|
||||
return fmt.Errorf("Invalid value for dispose: %s", raw.Dispose)
|
||||
}
|
||||
ans.ColorSpace = raw.Colorspace
|
||||
switch raw.Orientation {
|
||||
case "5", "6", "7", "8":
|
||||
ans.Dimensions_swapped = true
|
||||
}
|
||||
if ans.Dimensions_swapped {
|
||||
ans.Canvas.Width, ans.Canvas.Height = ans.Canvas.Height, ans.Canvas.Width
|
||||
ans.Width, ans.Height = ans.Height, ans.Width
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func identify(path *input) (ans []IdentifyRecord, err error) {
|
||||
cmd := []string{"identify"}
|
||||
q := `{"fmt":"%m","canvas":"%g","transparency":"%A","gap":"%T","index":"%p","size":"%wx%h",` +
|
||||
`"dpi":"%xx%y","dispose":"%D","orientation":"%[EXIF:Orientation]","colorspace":"%[colorspace]"},`
|
||||
cmd = append(cmd, "-format", q, "--", path.arg)
|
||||
output, err := RunMagick(path, cmd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to identify image at path: %s with error: %w", path, err)
|
||||
}
|
||||
output = bytes.TrimRight(bytes.TrimSpace(output), ",")
|
||||
raw_json := make([]byte, 0, len(output)+2)
|
||||
raw_json = append(raw_json, '[')
|
||||
raw_json = append(raw_json, output...)
|
||||
raw_json = append(raw_json, ']')
|
||||
var records []IdentifyOutput
|
||||
err = json.Unmarshal(raw_json, &records)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("The ImageMagick identify program returned malformed output for the image at path: %s, with error: %w", path, err)
|
||||
}
|
||||
ans = make([]IdentifyRecord, len(records))
|
||||
for i, rec := range records {
|
||||
err = parse_identify_record(&ans[i], &rec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
type RenderOptions struct {
|
||||
Background *color.RGBA64
|
||||
ResizeTo image.Point
|
||||
OnlyFirstFrame bool
|
||||
AutoOrient bool
|
||||
ToSRGB bool
|
||||
Transform types.TransformType
|
||||
RenderingIntent icc.RenderingIntent
|
||||
BlackpointCompensation bool
|
||||
}
|
||||
|
||||
func rgba64ToImageMagick(c_ color.RGBA64) string {
|
||||
c := color.NRGBA64Model.Convert(c_).(color.NRGBA64)
|
||||
rPercent := float64(c.R) / 65535.0 * 100.0
|
||||
gPercent := float64(c.G) / 65535.0 * 100.0
|
||||
bPercent := float64(c.B) / 65535.0 * 100.0
|
||||
alpha := float64(c.A) / 65535.0
|
||||
return fmt.Sprintf("rgba(%.3f%%,%.3f%%,%.3f%%,%.4f)", rPercent, gPercent, bPercent, alpha)
|
||||
}
|
||||
|
||||
func is_not_srgb(name string) bool {
|
||||
return name != "" && strings.ToUpper(name) != "SRGB"
|
||||
}
|
||||
|
||||
func render(path *input, ro *RenderOptions, is_srgb bool, frames []IdentifyRecord) (ans []*ImageFrame, err error) {
|
||||
cmd := []string{}
|
||||
add_alpha_remove := false
|
||||
if ro.Background == nil {
|
||||
cmd = append(cmd, "-background", "none")
|
||||
} else {
|
||||
if ro.Background.A == 0xffff {
|
||||
n := nrgb.Model.Convert(*ro.Background).(nrgb.Color)
|
||||
add_alpha_remove = true
|
||||
cmd = append(cmd, "-background", n.AsSharp())
|
||||
} else {
|
||||
cmd = append(cmd, "-background", rgba64ToImageMagick(*ro.Background))
|
||||
}
|
||||
}
|
||||
cpath := path.arg
|
||||
if ro.OnlyFirstFrame {
|
||||
cpath += "[0]"
|
||||
}
|
||||
has_multiple_frames := len(frames) > 1
|
||||
get_multiple_frames := has_multiple_frames && !ro.OnlyFirstFrame
|
||||
cmd = append(cmd, "--", cpath)
|
||||
if ro.AutoOrient {
|
||||
cmd = append(cmd, "-auto-orient")
|
||||
}
|
||||
if add_alpha_remove {
|
||||
cmd = append(cmd, "-alpha", "remove")
|
||||
} else if ro.Background != nil {
|
||||
cmd = append(cmd, "-flatten")
|
||||
}
|
||||
switch ro.Transform {
|
||||
case types.FlipHTransform:
|
||||
cmd = append(cmd, "-flop")
|
||||
case types.FlipVTransform:
|
||||
cmd = append(cmd, "-flip")
|
||||
case types.TransposeTransform:
|
||||
cmd = append(cmd, "-transpose")
|
||||
case types.TransverseTransform:
|
||||
cmd = append(cmd, "-transverse")
|
||||
case types.Rotate90Transform:
|
||||
cmd = append(cmd, "-rotate", "-90")
|
||||
case types.Rotate180Transform:
|
||||
cmd = append(cmd, "-rotate", "180")
|
||||
case types.Rotate270Transform:
|
||||
cmd = append(cmd, "-rotate", "-270")
|
||||
}
|
||||
tdir, err := os.MkdirTemp(TempDirInRAMIfPossible(), "")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to create temporary directory to hold ImageMagick output with error: %w", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tdir)
|
||||
if ro.ToSRGB && !is_srgb {
|
||||
profile_path := filepath.Join(tdir, "sRGB.icc")
|
||||
if err = os.WriteFile(profile_path, icc.Srgb_xyz_profile_data, 0o666); err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary file with profile for ImageMagick with error: %w", err)
|
||||
}
|
||||
|
||||
cmd = append(cmd, icc.IfElse(ro.BlackpointCompensation, "-", "+")+"black-point-compensation")
|
||||
cmd = append(cmd, "-intent", ro.RenderingIntent.String())
|
||||
cmd = append(cmd, "-profile", profile_path)
|
||||
}
|
||||
if ro.ResizeTo.X > 0 {
|
||||
rcmd := []string{"-resize", fmt.Sprintf("%dx%d!", ro.ResizeTo.X, ro.ResizeTo.Y)}
|
||||
if get_multiple_frames {
|
||||
cmd = append(cmd, "-coalesce")
|
||||
cmd = append(cmd, rcmd...)
|
||||
cmd = append(cmd, "-deconstruct")
|
||||
} else {
|
||||
cmd = append(cmd, rcmd...)
|
||||
}
|
||||
}
|
||||
cmd = append(cmd, "-depth", "8", "-set", "filename:f", "%w-%h-%g-%p")
|
||||
if get_multiple_frames {
|
||||
cmd = append(cmd, "+adjoin")
|
||||
}
|
||||
mode := "rgba"
|
||||
if frames[0].Is_opaque {
|
||||
mode = "rgb"
|
||||
}
|
||||
cmd = append(cmd, filepath.Join(tdir, "im-%[filename:f]."+mode))
|
||||
_, err = RunMagick(path, cmd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
entries, err := os.ReadDir(tdir)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to read temp dir used to store ImageMagick output with error: %w", err)
|
||||
return
|
||||
}
|
||||
gaps := make([]int, len(frames))
|
||||
for i, frame := range frames {
|
||||
gaps[i] = frame.Gap
|
||||
}
|
||||
// although ImageMagick *might* be already taking care of this adjustment,
|
||||
// I dont know for sure, so do it anyway.
|
||||
min_gap := gifmeta.CalcMinimumGap(gaps)
|
||||
for _, entry := range entries {
|
||||
fname := entry.Name()
|
||||
p, _, _ := strings.Cut(fname, ".")
|
||||
parts := strings.Split(p, "-")
|
||||
if len(parts) < 5 {
|
||||
continue
|
||||
}
|
||||
index, cerr := strconv.Atoi(parts[len(parts)-1])
|
||||
if cerr != nil || index < 0 || index >= len(frames) {
|
||||
continue
|
||||
}
|
||||
width, cerr := strconv.Atoi(parts[1])
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
height, cerr := strconv.Atoi(parts[2])
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
_, pos, found := strings.Cut(parts[3], "+")
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
px, py, found := strings.Cut(pos, "+")
|
||||
if !found {
|
||||
// ImageMagick is a buggy POS
|
||||
px, py = "0", "0"
|
||||
}
|
||||
x, cerr := strconv.Atoi(px)
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
y, cerr := strconv.Atoi(py)
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
identify_data := frames[index]
|
||||
path := filepath.Join(tdir, fname)
|
||||
frame := ImageFrame{
|
||||
Number: index + 1, Width: width, Height: height, Left: x, Top: y, Is_opaque: identify_data.Is_opaque,
|
||||
}
|
||||
frame.Delay_ms = int32(max(min_gap, identify_data.Gap) * 10)
|
||||
err = check_resize(&frame, path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read temp file for image %#v with error: %w", path, err)
|
||||
}
|
||||
dest_rect := image.Rect(0, 0, frame.Width, frame.Height)
|
||||
if frame.Is_opaque {
|
||||
frame.Img = &nrgb.Image{Pix: data, Stride: frame.Width * 3, Rect: dest_rect}
|
||||
} else {
|
||||
frame.Img = &image.NRGBA{Pix: data, Stride: frame.Width * 4, Rect: dest_rect}
|
||||
}
|
||||
ans = append(ans, &frame)
|
||||
}
|
||||
if len(ans) < len(frames) {
|
||||
err = fmt.Errorf("Failed to render %d out of %d frames", len(frames)-len(ans), len(frames))
|
||||
return
|
||||
}
|
||||
slices.SortFunc(ans, func(a, b *ImageFrame) int { return a.Number - b.Number })
|
||||
prev_disposal := DisposeBackground
|
||||
prev_compose_onto := 0
|
||||
for i, frame := range ans {
|
||||
switch prev_disposal {
|
||||
case DisposeNone:
|
||||
frame.Compose_onto = frame.Number - 1
|
||||
case DisposePrevious:
|
||||
frame.Compose_onto = prev_compose_onto
|
||||
}
|
||||
prev_disposal, prev_compose_onto = frames[i].Disposal, frame.Compose_onto
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
Frames []*ImageFrame
|
||||
Format_uppercase string
|
||||
}
|
||||
|
||||
type input struct {
|
||||
arg string
|
||||
os_file *os.File
|
||||
needs_close bool
|
||||
needs_remove string
|
||||
}
|
||||
|
||||
func (i input) String() string {
|
||||
return i.arg
|
||||
}
|
||||
|
||||
func magick_input_path(i *types.Input) (inp *input, err error) {
|
||||
if i.Path != "" {
|
||||
return &input{arg: i.Path}, nil
|
||||
}
|
||||
r := i.Reader
|
||||
if s, ok := r.(*os.File); ok {
|
||||
if _, serr := s.Seek(0, io.SeekCurrent); serr == nil {
|
||||
if s.Name() != "" {
|
||||
if _, serr := os.Stat(s.Name()); serr == nil {
|
||||
return &input{arg: s.Name()}, nil
|
||||
}
|
||||
}
|
||||
if runtime.GOOS != "windows" {
|
||||
return &input{arg: "fd:3", os_file: s}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f, merr := memfd(data); merr == nil {
|
||||
return &input{arg: "fd:3", os_file: f, needs_close: true}, nil
|
||||
}
|
||||
f, err := os.CreateTemp(TempDirInRAMIfPossible(), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = f.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &input{arg: f.Name(), needs_remove: f.Name()}, nil
|
||||
}
|
||||
|
||||
func OpenAll(input *types.Input, md *meta.Data, callback func(w, h int) RenderOptions) (ans *Image, err error) {
|
||||
if !HasMagick() {
|
||||
return nil, fmt.Errorf("the magick command as not found in PATH")
|
||||
}
|
||||
// ImageMagick needs to be told explicitly to use APNG otherwise it only returns the first frame
|
||||
i, err := magick_input_path(input)
|
||||
if i.os_file != nil && i.needs_close {
|
||||
defer i.os_file.Close()
|
||||
}
|
||||
if i.needs_remove != "" {
|
||||
defer os.Remove(i.needs_remove)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is_apng := md != nil && md.Format == types.PNG && md.HasFrames
|
||||
if is_apng {
|
||||
i.arg = "APNG:" + i.arg
|
||||
}
|
||||
identify_records, err := identify(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is_srgb := !is_not_srgb(identify_records[0].ColorSpace)
|
||||
if is_srgb && md != nil {
|
||||
// ImageMagick is a PoS that cant identify profiles
|
||||
is_srgb = md.IsSRGB()
|
||||
}
|
||||
ro := callback(identify_records[0].Canvas.Width, identify_records[0].Canvas.Height)
|
||||
frames, err := render(i, &ro, is_srgb, identify_records)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans = &Image{
|
||||
Format_uppercase: identify_records[0].Fmt_uppercase, Frames: frames,
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
26
vendor/github.com/kovidgoyal/imaging/magick/magick_unix.go
generated
vendored
Normal file
26
vendor/github.com/kovidgoyal/imaging/magick/magick_unix.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
//go:build !windows
|
||||
|
||||
package magick
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kovidgoyal/go-shm"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func get_temp_dir() string {
|
||||
if shm.SHM_DIR != "" {
|
||||
tempFile, err := os.CreateTemp(shm.SHM_DIR, "write_check_*")
|
||||
if err != nil {
|
||||
return os.TempDir()
|
||||
}
|
||||
tempFile.Close()
|
||||
os.Remove(tempFile.Name())
|
||||
return shm.SHM_DIR
|
||||
}
|
||||
return os.TempDir()
|
||||
|
||||
}
|
||||
14
vendor/github.com/kovidgoyal/imaging/magick/magick_windows.go
generated
vendored
Normal file
14
vendor/github.com/kovidgoyal/imaging/magick/magick_windows.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build windows
|
||||
|
||||
package magick
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func get_temp_dir() string {
|
||||
return os.TempDir()
|
||||
}
|
||||
29
vendor/github.com/kovidgoyal/imaging/magick/memfd_linux.go
generated
vendored
Normal file
29
vendor/github.com/kovidgoyal/imaging/magick/memfd_linux.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
//go:build linux
|
||||
|
||||
package magick
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func memfd(data []byte) (ans *os.File, err error) {
|
||||
fd, err := unix.MemfdCreate("memfile", unix.O_CLOEXEC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = unix.Write(fd, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = unix.Seek(fd, 0, unix.SEEK_SET)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans = os.NewFile(uintptr(fd), "memfile")
|
||||
return
|
||||
}
|
||||
14
vendor/github.com/kovidgoyal/imaging/magick/memfd_other.go
generated
vendored
Normal file
14
vendor/github.com/kovidgoyal/imaging/magick/memfd_other.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build !linux
|
||||
|
||||
package magick
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func memfd(data []byte) (ans *os.File, err error) {
|
||||
return nil, fmt.Errorf("ENOSYS")
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package imaging
|
||||
package netpbm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kovidgoyal/go-parallel"
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
@@ -88,6 +92,9 @@ func read_ppm_header(br *bufio.Reader, magic string) (ans header, err error) {
|
||||
ans.width = fields[0]
|
||||
ans.height = fields[1]
|
||||
if required_num_fields > 2 {
|
||||
if fields[2] > 65535 {
|
||||
return ans, fmt.Errorf("header specifies a maximum value %d larger than 65535", ans.maxval)
|
||||
}
|
||||
ans.maxval = uint32(fields[2])
|
||||
}
|
||||
if ans.maxval > 65535 {
|
||||
@@ -243,15 +250,15 @@ func decode_rgb_ascii(br *bufio.Reader, h header) (ans []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeNetPBMConfig(r io.Reader) (cfg image.Config, err error) {
|
||||
func DecodeConfigAndFormat(r io.Reader) (cfg image.Config, fmt types.Format, err error) {
|
||||
br := bufio.NewReader(r)
|
||||
h, err := read_header(br)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return cfg, types.UNKNOWN, err
|
||||
}
|
||||
cfg.Width = int(h.width)
|
||||
cfg.Height = int(h.height)
|
||||
cfg.ColorModel = NRGBModel
|
||||
cfg.ColorModel = nrgb.Model
|
||||
switch h.data_type {
|
||||
case blackwhite, grayscale:
|
||||
if h.has_alpha {
|
||||
@@ -278,10 +285,25 @@ func DecodeNetPBMConfig(r io.Reader) (cfg image.Config, err error) {
|
||||
if h.maxval > 255 {
|
||||
cfg.ColorModel = color.NRGBA64Model
|
||||
} else {
|
||||
cfg.ColorModel = NRGBModel
|
||||
cfg.ColorModel = nrgb.Model
|
||||
}
|
||||
}
|
||||
}
|
||||
switch h.format {
|
||||
case "P7":
|
||||
fmt = types.PAM
|
||||
case "P1", "P4":
|
||||
fmt = types.PBM
|
||||
case "P2", "P5":
|
||||
fmt = types.PGM
|
||||
case "P3", "P6":
|
||||
fmt = types.PPM
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeConfig(r io.Reader) (cfg image.Config, err error) {
|
||||
cfg, _, err = DecodeConfigAndFormat(r)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -337,7 +359,7 @@ func rescale(v uint32, num, den uint32) uint32 {
|
||||
}
|
||||
|
||||
func rescale_binary_data(b []uint8, num, den uint32) error {
|
||||
return run_in_parallel_over_range(0, func(start, end int) {
|
||||
return parallel.Run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
b[i] = uint8(rescale(uint32(b[i]), num, den))
|
||||
}
|
||||
@@ -348,7 +370,7 @@ func rescale_binary_data16(b []uint8, num, den uint32) error {
|
||||
if len(b)&1 != 0 {
|
||||
return fmt.Errorf("pixel data is not a multiple of two but uses 16 bits per channel")
|
||||
}
|
||||
return run_in_parallel_over_range(0, func(start, end int) {
|
||||
return parallel.Run_in_parallel_over_range(0, func(start, end int) {
|
||||
start *= 2
|
||||
end *= 2
|
||||
for i := start; i < end; i += 2 {
|
||||
@@ -394,7 +416,7 @@ func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error)
|
||||
if h.maxval > 255 {
|
||||
g := image.NewNRGBA64(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
if err = parallel.Run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*4 : i*4+4]
|
||||
dest := b[i*8 : i*8+8]
|
||||
@@ -408,7 +430,7 @@ func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error)
|
||||
}
|
||||
g := image.NewNRGBA(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
if err = parallel.Run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*2 : i*2+2]
|
||||
dest := b[i*4 : i*4+4]
|
||||
@@ -423,7 +445,7 @@ func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error)
|
||||
if h.maxval > 255 {
|
||||
g := image.NewNRGBA64(r)
|
||||
b := g.Pix
|
||||
if err = run_in_parallel_over_range(0, func(start, end int) {
|
||||
if err = parallel.Run_in_parallel_over_range(0, func(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
src := binary_data[i*6 : i*6+6]
|
||||
dest := b[i*8 : i*8+8]
|
||||
@@ -435,7 +457,7 @@ func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error)
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
return NewNRGBWithContiguousRGBPixels(binary_data, 0, 0, r.Dx(), r.Dy())
|
||||
return nrgb.NewNRGBWithContiguousRGBPixels(binary_data, 0, 0, r.Dx(), r.Dy())
|
||||
case 4:
|
||||
// RGB with alpha
|
||||
if h.maxval <= 255 {
|
||||
@@ -449,7 +471,7 @@ func decode_binary_data(br *bufio.Reader, h header) (ans image.Image, err error)
|
||||
|
||||
// Decode decodes a PPM image from r and returns it as an image.Image.
|
||||
// Supports both P3 (ASCII) and P6 (binary) variants.
|
||||
func DecodeNetPBM(r io.Reader) (img image.Image, err error) {
|
||||
func Decode(r io.Reader) (img image.Image, err error) {
|
||||
br := bufio.NewReader(r)
|
||||
h, err := read_header(br)
|
||||
if err != nil {
|
||||
@@ -467,7 +489,7 @@ func DecodeNetPBM(r io.Reader) (img image.Image, err error) {
|
||||
return nil, err
|
||||
}
|
||||
if h.maxval <= 255 {
|
||||
return NewNRGBWithContiguousRGBPixels(vals, 0, 0, int(h.width), int(h.height))
|
||||
return nrgb.NewNRGBWithContiguousRGBPixels(vals, 0, 0, int(h.width), int(h.height))
|
||||
}
|
||||
return &image.NRGBA64{Pix: vals, Stride: int(h.width) * 8, Rect: image.Rect(0, 0, int(h.width), int(h.height))}, nil
|
||||
case "P4":
|
||||
@@ -500,11 +522,11 @@ func DecodeNetPBM(r io.Reader) (img image.Image, err error) {
|
||||
|
||||
// Register this decoder with Go's image package
|
||||
func init() {
|
||||
image.RegisterFormat("pbm", "P1", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pgm", "P2", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("ppm", "P3", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pbm", "P4", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pgm", "P5", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("ppm", "P6", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pam", "P7", DecodeNetPBM, DecodeNetPBMConfig)
|
||||
image.RegisterFormat("pbm", "P1", Decode, DecodeConfig)
|
||||
image.RegisterFormat("pgm", "P2", Decode, DecodeConfig)
|
||||
image.RegisterFormat("ppm", "P3", Decode, DecodeConfig)
|
||||
image.RegisterFormat("pbm", "P4", Decode, DecodeConfig)
|
||||
image.RegisterFormat("pgm", "P5", Decode, DecodeConfig)
|
||||
image.RegisterFormat("ppm", "P6", Decode, DecodeConfig)
|
||||
image.RegisterFormat("pam", "P7", Decode, DecodeConfig)
|
||||
}
|
||||
@@ -1,26 +1,24 @@
|
||||
package imaging
|
||||
package nrgb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type NRGBColor struct {
|
||||
type Color struct {
|
||||
R, G, B uint8
|
||||
}
|
||||
|
||||
func (c NRGBColor) AsSharp() string {
|
||||
func (c Color) AsSharp() string {
|
||||
return fmt.Sprintf("#%02X%02X%02X", c.R, c.G, c.B)
|
||||
}
|
||||
|
||||
func (c NRGBColor) String() string {
|
||||
return fmt.Sprintf("NRGBColor{%02X %02X %02X}", c.R, c.G, c.B)
|
||||
}
|
||||
|
||||
func (c NRGBColor) RGBA() (r, g, b, a uint32) {
|
||||
func (c Color) RGBA() (r, g, b, a uint32) {
|
||||
r = uint32(c.R)
|
||||
r |= r << 8
|
||||
g = uint32(c.G)
|
||||
@@ -31,8 +29,8 @@ func (c NRGBColor) RGBA() (r, g, b, a uint32) {
|
||||
return
|
||||
}
|
||||
|
||||
// NRGB is an in-memory image whose At method returns NRGBColor values.
|
||||
type NRGB struct {
|
||||
// Image is an in-memory image whose At method returns Color values.
|
||||
type Image struct {
|
||||
// Pix holds the image's pixels, in R, G, B order. The pixel at
|
||||
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3].
|
||||
Pix []uint8
|
||||
@@ -43,62 +41,65 @@ type NRGB struct {
|
||||
}
|
||||
|
||||
func nrgbModel(c color.Color) color.Color {
|
||||
if _, ok := c.(NRGBColor); ok {
|
||||
switch q := c.(type) {
|
||||
case Color:
|
||||
return c
|
||||
case color.NRGBA:
|
||||
return Color{q.R, q.G, q.B}
|
||||
case color.NRGBA64:
|
||||
return Color{uint8(q.R >> 8), uint8(q.G >> 8), uint8(q.B >> 8)}
|
||||
}
|
||||
r, g, b, a := c.RGBA()
|
||||
switch a {
|
||||
case 0xffff:
|
||||
return NRGBColor{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
return Color{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
case 0:
|
||||
return NRGBColor{0, 0, 0}
|
||||
return Color{0, 0, 0}
|
||||
default:
|
||||
// Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a.
|
||||
r = (r * 0xffff) / a
|
||||
g = (g * 0xffff) / a
|
||||
b = (b * 0xffff) / a
|
||||
return NRGBColor{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
return Color{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)}
|
||||
}
|
||||
}
|
||||
|
||||
var NRGBModel color.Model = color.ModelFunc(nrgbModel)
|
||||
var Model color.Model = color.ModelFunc(nrgbModel)
|
||||
|
||||
func (p *NRGB) ColorModel() color.Model { return NRGBModel }
|
||||
func (p *Image) ColorModel() color.Model { return Model }
|
||||
|
||||
func (p *NRGB) Bounds() image.Rectangle { return p.Rect }
|
||||
func (p *Image) Bounds() image.Rectangle { return p.Rect }
|
||||
|
||||
func (p *NRGB) At(x, y int) color.Color {
|
||||
func (p *Image) At(x, y int) color.Color {
|
||||
return p.NRGBAt(x, y)
|
||||
}
|
||||
|
||||
func (p *NRGB) NRGBAt(x, y int) NRGBColor {
|
||||
func (p *Image) NRGBAt(x, y int) Color {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return NRGBColor{}
|
||||
return Color{}
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
return NRGBColor{s[0], s[1], s[2]}
|
||||
return Color{s[0], s[1], s[2]}
|
||||
}
|
||||
|
||||
// PixOffset returns the index of the first element of Pix that corresponds to
|
||||
// the pixel at (x, y).
|
||||
func (p *NRGB) PixOffset(x, y int) int {
|
||||
func (p *Image) PixOffset(x, y int) int {
|
||||
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3
|
||||
}
|
||||
|
||||
func (p *NRGB) Set(x, y int, c color.Color) {
|
||||
func (p *Image) Set(x, y int, c color.Color) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
i := p.PixOffset(x, y)
|
||||
c1 := NRGBModel.Convert(c).(NRGBColor)
|
||||
s := p.Pix[i : i+3 : i+3] // Small cap improves performance, see https://golang.org/issue/27857
|
||||
s[0] = c1.R
|
||||
s[1] = c1.G
|
||||
s[2] = c1.B
|
||||
q := nrgbModel(c).(Color)
|
||||
s[0], s[1], s[2] = q.R, q.G, q.B
|
||||
}
|
||||
|
||||
func (p *NRGB) SetRGBA64(x, y int, c color.RGBA64) {
|
||||
func (p *Image) SetRGBA64(x, y int, c color.RGBA64) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
@@ -115,7 +116,7 @@ func (p *NRGB) SetRGBA64(x, y int, c color.RGBA64) {
|
||||
s[2] = uint8(b >> 8)
|
||||
}
|
||||
|
||||
func (p *NRGB) SetNRGBA(x, y int, c color.NRGBA) {
|
||||
func (p *Image) SetNRGBA(x, y int, c color.NRGBA) {
|
||||
if !(image.Point{x, y}.In(p.Rect)) {
|
||||
return
|
||||
}
|
||||
@@ -128,16 +129,16 @@ func (p *NRGB) SetNRGBA(x, y int, c color.NRGBA) {
|
||||
|
||||
// SubImage returns an image representing the portion of the image p visible
|
||||
// through r. The returned value shares pixels with the original image.
|
||||
func (p *NRGB) SubImage(r image.Rectangle) image.Image {
|
||||
func (p *Image) SubImage(r image.Rectangle) image.Image {
|
||||
r = r.Intersect(p.Rect)
|
||||
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
|
||||
// either r1 or r2 if the intersection is empty. Without explicitly checking for
|
||||
// this, the Pix[i:] expression below can panic.
|
||||
if r.Empty() {
|
||||
return &NRGB{}
|
||||
return &Image{}
|
||||
}
|
||||
i := p.PixOffset(r.Min.X, r.Min.Y)
|
||||
return &NRGB{
|
||||
return &Image{
|
||||
Pix: p.Pix[i:],
|
||||
Stride: p.Stride,
|
||||
Rect: r,
|
||||
@@ -145,19 +146,20 @@ func (p *NRGB) SubImage(r image.Rectangle) image.Image {
|
||||
}
|
||||
|
||||
// Opaque scans the entire image and reports whether it is fully opaque.
|
||||
func (p *NRGB) Opaque() bool { return true }
|
||||
func (p *Image) Opaque() bool { return true }
|
||||
|
||||
type scanner_rgb struct {
|
||||
image image.Image
|
||||
w, h int
|
||||
palette []NRGBColor
|
||||
palette []Color
|
||||
opaque_base []float64
|
||||
opaque_base_uint []uint8
|
||||
}
|
||||
|
||||
func (s scanner_rgb) Bytes_per_channel() int { return 1 }
|
||||
func (s scanner_rgb) Num_of_channels() int { return 3 }
|
||||
func (s scanner_rgb) Bounds() image.Rectangle { return s.image.Bounds() }
|
||||
func (s scanner_rgb) Bytes_per_channel() int { return 1 }
|
||||
func (s scanner_rgb) Num_of_channels() int { return 3 }
|
||||
func (s scanner_rgb) Bounds() image.Rectangle { return s.image.Bounds() }
|
||||
func (s scanner_rgb) NewImage(r image.Rectangle) image.Image { return NewNRGB(r) }
|
||||
|
||||
func blend(dest []uint8, base []float64, r, g, b, a uint8) {
|
||||
alpha := float64(a) / 255.0
|
||||
@@ -166,14 +168,45 @@ func blend(dest []uint8, base []float64, r, g, b, a uint8) {
|
||||
dest[2] = uint8(alpha*float64(b) + (1.0-alpha)*base[2])
|
||||
}
|
||||
|
||||
func newScannerRGB(img image.Image, opaque_base NRGBColor) *scanner_rgb {
|
||||
func reverse3(pix []uint8) {
|
||||
if len(pix) <= 3 {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
j := len(pix) - 3
|
||||
for i < j {
|
||||
pi := pix[i : i+3 : i+3]
|
||||
pj := pix[j : j+3 : j+3]
|
||||
pi[0], pj[0] = pj[0], pi[0]
|
||||
pi[1], pj[1] = pj[1], pi[1]
|
||||
pi[2], pj[2] = pj[2], pi[2]
|
||||
i += 3
|
||||
j -= 3
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner_rgb) ReverseRow(img image.Image, row int) {
|
||||
d := img.(*Image)
|
||||
pos := row * d.Stride
|
||||
r := d.Pix[pos : pos+d.Stride : pos+d.Stride]
|
||||
reverse3(r)
|
||||
}
|
||||
|
||||
func (s *scanner_rgb) ScanRow(x1, y1, x2, y2 int, img image.Image, row int) {
|
||||
d := img.(*Image)
|
||||
pos := row * d.Stride
|
||||
r := d.Pix[pos : pos+d.Stride : pos+d.Stride]
|
||||
s.Scan(x1, y1, x2, y2, r)
|
||||
}
|
||||
|
||||
func newScannerRGB(img image.Image, opaque_base Color) *scanner_rgb {
|
||||
s := &scanner_rgb{
|
||||
image: img, w: img.Bounds().Dx(), h: img.Bounds().Dy(),
|
||||
opaque_base: []float64{float64(opaque_base.R), float64(opaque_base.G), float64(opaque_base.B)}[0:3:3],
|
||||
opaque_base_uint: []uint8{opaque_base.R, opaque_base.G, opaque_base.B}[0:3:3],
|
||||
}
|
||||
if img, ok := img.(*image.Paletted); ok {
|
||||
s.palette = make([]NRGBColor, max(256, len(img.Palette)))
|
||||
s.palette = make([]Color, max(256, len(img.Palette)))
|
||||
d := [3]uint8{0, 0, 0}
|
||||
ds := d[:]
|
||||
for i := 0; i < len(img.Palette); i++ {
|
||||
@@ -182,18 +215,31 @@ func newScannerRGB(img image.Image, opaque_base NRGBColor) *scanner_rgb {
|
||||
case 0:
|
||||
s.palette[i] = opaque_base
|
||||
case 0xffff:
|
||||
s.palette[i] = NRGBColor{R: uint8(r >> 8), G: uint8(g >> 8), B: uint8(b >> 8)}
|
||||
s.palette[i] = Color{R: uint8(r >> 8), G: uint8(g >> 8), B: uint8(b >> 8)}
|
||||
default:
|
||||
blend(ds, s.opaque_base, uint8((r*0xffff/a)>>8), uint8((g*0xffff/a)>>8), uint8((b*0xffff/a)>>8), uint8(a>>8))
|
||||
s.palette[i] = NRGBColor{R: d[0], G: d[1], B: d[2]}
|
||||
s.palette[i] = Color{R: d[0], G: d[1], B: d[2]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scanner_rgb) blend8(d []uint8, a uint8) {
|
||||
switch a {
|
||||
case 0:
|
||||
d[0] = s.opaque_base_uint[0]
|
||||
d[1] = s.opaque_base_uint[1]
|
||||
d[2] = s.opaque_base_uint[2]
|
||||
case 0xff:
|
||||
default:
|
||||
blend(d, s.opaque_base, d[0], d[1], d[2], a)
|
||||
}
|
||||
}
|
||||
|
||||
// scan scans the given rectangular region of the image into dst.
|
||||
func (s *scanner_rgb) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
_ = dst[3*(x2-x1)*(y2-y1)-1]
|
||||
switch img := s.image.(type) {
|
||||
case *image.NRGBA:
|
||||
j := 0
|
||||
@@ -306,68 +352,108 @@ func (s *scanner_rgb) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
}
|
||||
|
||||
case *image.YCbCr:
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
if img.SubsampleRatio == image.YCbCrSubsampleRatio444 {
|
||||
Y := img.Y[y1*img.YStride:]
|
||||
Cb := img.Cb[y1*img.CStride:]
|
||||
Cr := img.Cr[y1*img.CStride:]
|
||||
for range y2 - y1 {
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[0:3:3]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(Y[x], Cb[x], Cr[x])
|
||||
dst = dst[3:]
|
||||
}
|
||||
Y, Cb, Cr = Y[img.YStride:], Cb[img.CStride:], Cr[img.CStride:]
|
||||
}
|
||||
} else {
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
case image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
yy1 := int32(img.Y[iy]) * 0x10101
|
||||
cb1 := int32(img.Cb[ic]) - 128
|
||||
cr1 := int32(img.Cr[ic]) - 128
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(img.Y[iy], img.Cb[ic], img.Cr[ic])
|
||||
iy++
|
||||
j += 3
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.NYCbCrA:
|
||||
if img.SubsampleRatio == image.YCbCrSubsampleRatio444 {
|
||||
Y := img.Y[y1*img.YStride:]
|
||||
A := img.A[y1*img.AStride:]
|
||||
Cb := img.Cb[y1*img.CStride:]
|
||||
Cr := img.Cr[y1*img.CStride:]
|
||||
for range y2 - y1 {
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[0:3:3]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(Y[x], Cb[x], Cr[x])
|
||||
s.blend8(d, A[x])
|
||||
dst = dst[3:]
|
||||
}
|
||||
Y, Cb, Cr = Y[img.YStride:], Cb[img.CStride:], Cr[img.CStride:]
|
||||
A = A[img.AStride:]
|
||||
}
|
||||
} else {
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
r := yy1 + 91881*cr1
|
||||
if uint32(r)&0xff000000 == 0 {
|
||||
r >>= 16
|
||||
} else {
|
||||
r = ^(r >> 31)
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
ia := (y-img.Rect.Min.Y)*img.AStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
g := yy1 - 22554*cb1 - 46802*cr1
|
||||
if uint32(g)&0xff000000 == 0 {
|
||||
g >>= 16
|
||||
} else {
|
||||
g = ^(g >> 31)
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(img.Y[iy], img.Cb[ic], img.Cr[ic])
|
||||
s.blend8(d, img.A[ia])
|
||||
iy++
|
||||
j += 3
|
||||
}
|
||||
|
||||
b := yy1 + 116130*cb1
|
||||
if uint32(b)&0xff000000 == 0 {
|
||||
b >>= 16
|
||||
} else {
|
||||
b = ^(b >> 31)
|
||||
}
|
||||
|
||||
d := dst[j : j+3 : j+3]
|
||||
d[0] = uint8(r)
|
||||
d[1] = uint8(g)
|
||||
d[2] = uint8(b)
|
||||
|
||||
iy++
|
||||
j += 3
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,26 +501,26 @@ func (s *scanner_rgb) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
}
|
||||
}
|
||||
|
||||
func NewNRGB(r image.Rectangle) *NRGB {
|
||||
return &NRGB{
|
||||
func NewNRGB(r image.Rectangle) *Image {
|
||||
return &Image{
|
||||
Pix: make([]uint8, 3*r.Dx()*r.Dy()),
|
||||
Stride: 3 * r.Dx(),
|
||||
Rect: r,
|
||||
}
|
||||
}
|
||||
|
||||
func NewNRGBWithContiguousRGBPixels(p []byte, left, top, width, height int) (*NRGB, error) {
|
||||
func NewNRGBWithContiguousRGBPixels(p []byte, left, top, width, height int) (*Image, error) {
|
||||
const bpp = 3
|
||||
if expected := bpp * width * height; expected != len(p) {
|
||||
return nil, fmt.Errorf("the image width and height dont match the size of the specified pixel data: width=%d height=%d sz=%d != %d", width, height, len(p), expected)
|
||||
}
|
||||
return &NRGB{
|
||||
return &Image{
|
||||
Pix: p,
|
||||
Stride: bpp * width,
|
||||
Rect: image.Rectangle{image.Point{left, top}, image.Point{left + width, top + height}},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewNRGBScanner(source_image image.Image, opaque_base NRGBColor) Scanner {
|
||||
func NewNRGBScanner(source_image image.Image, opaque_base Color) types.Scanner {
|
||||
return newScannerRGB(source_image, opaque_base)
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
package imaging
|
||||
package nrgba
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
type scanner struct {
|
||||
@@ -14,6 +17,9 @@ type scanner struct {
|
||||
func (s scanner) Bytes_per_channel() int { return 1 }
|
||||
func (s scanner) Num_of_channels() int { return 4 }
|
||||
func (s scanner) Bounds() image.Rectangle { return s.image.Bounds() }
|
||||
func (s scanner) NewImage(r image.Rectangle) image.Image {
|
||||
return image.NewNRGBA(r)
|
||||
}
|
||||
|
||||
func newScanner(img image.Image) *scanner {
|
||||
s := &scanner{
|
||||
@@ -30,12 +36,45 @@ func newScanner(img image.Image) *scanner {
|
||||
return s
|
||||
}
|
||||
|
||||
func reverse4(pix []uint8) {
|
||||
if len(pix) <= 4 {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
j := len(pix) - 4
|
||||
for i < j {
|
||||
pi := pix[i : i+4 : i+4]
|
||||
pj := pix[j : j+4 : j+4]
|
||||
pi[0], pj[0] = pj[0], pi[0]
|
||||
pi[1], pj[1] = pj[1], pi[1]
|
||||
pi[2], pj[2] = pj[2], pi[2]
|
||||
pi[3], pj[3] = pj[3], pi[3]
|
||||
i += 4
|
||||
j -= 4
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) ReverseRow(img image.Image, row int) {
|
||||
d := img.(*image.NRGBA)
|
||||
pos := row * d.Stride
|
||||
r := d.Pix[pos : pos+d.Stride : pos+d.Stride]
|
||||
reverse4(r)
|
||||
}
|
||||
|
||||
func (s *scanner) ScanRow(x1, y1, x2, y2 int, img image.Image, row int) {
|
||||
d := img.(*image.NRGBA)
|
||||
pos := row * d.Stride
|
||||
r := d.Pix[pos : pos+d.Stride : pos+d.Stride]
|
||||
s.Scan(x1, y1, x2, y2, r)
|
||||
}
|
||||
|
||||
// scan scans the given rectangular region of the image into dst.
|
||||
func (s *scanner) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
_ = dst[4*(x2-x1)*(y2-y1)-1]
|
||||
switch img := s.image.(type) {
|
||||
case *NRGB:
|
||||
j := 0
|
||||
case *nrgb.Image:
|
||||
if x2 == x1+1 {
|
||||
j := 0
|
||||
i := y1*img.Stride + x1*3
|
||||
for y := y1; y < y2; y++ {
|
||||
d := dst[j : j+4 : j+4]
|
||||
@@ -196,69 +235,115 @@ func (s *scanner) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
}
|
||||
|
||||
case *image.YCbCr:
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
if img.SubsampleRatio == image.YCbCrSubsampleRatio444 {
|
||||
Y := img.Y[y1*img.YStride:]
|
||||
Cb := img.Cb[y1*img.CStride:]
|
||||
Cr := img.Cr[y1*img.CStride:]
|
||||
for range y2 - y1 {
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[0:4:4]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(Y[x], Cb[x], Cr[x])
|
||||
d[3] = 255
|
||||
dst = dst[4:]
|
||||
}
|
||||
Y, Cb, Cr = Y[img.YStride:], Cb[img.CStride:], Cr[img.CStride:]
|
||||
}
|
||||
} else {
|
||||
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
case image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
yy1 := int32(img.Y[iy]) * 0x10101
|
||||
cb1 := int32(img.Cb[ic]) - 128
|
||||
cr1 := int32(img.Cr[ic]) - 128
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
|
||||
r := yy1 + 91881*cr1
|
||||
if uint32(r)&0xff000000 == 0 {
|
||||
r >>= 16
|
||||
} else {
|
||||
r = ^(r >> 31)
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(img.Y[iy], img.Cb[ic], img.Cr[ic])
|
||||
d[3] = 0xff
|
||||
|
||||
iy++
|
||||
j += 4
|
||||
}
|
||||
}
|
||||
}
|
||||
case *image.NYCbCrA:
|
||||
if img.SubsampleRatio == image.YCbCrSubsampleRatio444 {
|
||||
Y := img.Y[y1*img.YStride:]
|
||||
A := img.A[y1*img.AStride:]
|
||||
Cb := img.Cb[y1*img.CStride:]
|
||||
Cr := img.Cr[y1*img.CStride:]
|
||||
for range y2 - y1 {
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[0:4:4]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(Y[x], Cb[x], Cr[x])
|
||||
d[3] = A[x]
|
||||
dst = dst[4:]
|
||||
}
|
||||
Y, Cb, Cr = Y[img.YStride:], Cb[img.CStride:], Cr[img.CStride:]
|
||||
A = A[img.AStride:]
|
||||
}
|
||||
} else {
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
ia := (y-img.Rect.Min.Y)*img.AStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
g := yy1 - 22554*cb1 - 46802*cr1
|
||||
if uint32(g)&0xff000000 == 0 {
|
||||
g >>= 16
|
||||
} else {
|
||||
g = ^(g >> 31)
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0], d[1], d[2] = color.YCbCrToRGB(img.Y[iy], img.Cb[ic], img.Cr[ic])
|
||||
d[3] = img.A[ia]
|
||||
|
||||
iy++
|
||||
j += 4
|
||||
}
|
||||
|
||||
b := yy1 + 116130*cb1
|
||||
if uint32(b)&0xff000000 == 0 {
|
||||
b >>= 16
|
||||
} else {
|
||||
b = ^(b >> 31)
|
||||
}
|
||||
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = uint8(r)
|
||||
d[1] = uint8(g)
|
||||
d[2] = uint8(b)
|
||||
d[3] = 0xff
|
||||
|
||||
iy++
|
||||
j += 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,13 +397,6 @@ func (s *scanner) Scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
}
|
||||
}
|
||||
|
||||
type Scanner interface {
|
||||
Scan(x1, y1, x2, y2 int, dst []uint8)
|
||||
Bytes_per_channel() int
|
||||
Num_of_channels() int
|
||||
Bounds() image.Rectangle
|
||||
}
|
||||
|
||||
func NewNRGBAScanner(source_image image.Image) Scanner {
|
||||
func NewNRGBAScanner(source_image image.Image) types.Scanner {
|
||||
return newScanner(source_image)
|
||||
}
|
||||
155
vendor/github.com/kovidgoyal/imaging/opaque.go
generated
vendored
Normal file
155
vendor/github.com/kovidgoyal/imaging/opaque.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"image"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kovidgoyal/go-parallel"
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func is_opaque1(pix []uint8, w, h, stride int) bool {
|
||||
is_opaque := atomic.Bool{}
|
||||
is_opaque.Store(true)
|
||||
if err := parallel.Run_in_parallel_to_first_result(0, func(start, limit int, keep_going *atomic.Bool) bool {
|
||||
pix := pix[stride*start:]
|
||||
for range limit - start {
|
||||
p := pix[0:w:w]
|
||||
for _, x := range p {
|
||||
if x != 0xff {
|
||||
is_opaque.Store(false)
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !keep_going.Load() {
|
||||
return false
|
||||
}
|
||||
pix = pix[stride:]
|
||||
}
|
||||
return false
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return is_opaque.Load()
|
||||
}
|
||||
|
||||
func is_opaque8(pix []uint8, w, h, stride int) bool {
|
||||
is_opaque := atomic.Bool{}
|
||||
is_opaque.Store(true)
|
||||
if err := parallel.Run_in_parallel_to_first_result(0, func(start, limit int, keep_going *atomic.Bool) bool {
|
||||
pix := pix[stride*start:]
|
||||
for range limit - start {
|
||||
p := pix[0 : 4*w : 4*w]
|
||||
for range w {
|
||||
if p[3] != 0xff {
|
||||
is_opaque.Store(false)
|
||||
return true
|
||||
}
|
||||
p = p[4:]
|
||||
}
|
||||
if !keep_going.Load() {
|
||||
return false
|
||||
}
|
||||
pix = pix[stride:]
|
||||
}
|
||||
return false
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return is_opaque.Load()
|
||||
}
|
||||
|
||||
func is_opaque16(pix []uint8, w, h, stride int) bool {
|
||||
is_opaque := atomic.Bool{}
|
||||
is_opaque.Store(true)
|
||||
if err := parallel.Run_in_parallel_to_first_result(0, func(start, limit int, keep_going *atomic.Bool) bool {
|
||||
pix := pix[stride*start:]
|
||||
for range limit - start {
|
||||
p := pix[0 : 8*w : 8*w]
|
||||
for range w {
|
||||
if p[6] != 0xff || p[7] != 0xff {
|
||||
is_opaque.Store(false)
|
||||
return true
|
||||
}
|
||||
p = p[8:]
|
||||
}
|
||||
if !keep_going.Load() {
|
||||
return false
|
||||
}
|
||||
pix = pix[stride:]
|
||||
}
|
||||
return false
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return is_opaque.Load()
|
||||
}
|
||||
|
||||
func IsOpaqueType(img image.Image) (ans bool) {
|
||||
switch img.(type) {
|
||||
case *nrgb.Image, *image.CMYK, *image.YCbCr, *image.Gray, *image.Gray16:
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func IsOpaque(img image.Image) (ans bool) {
|
||||
type is_opaque interface{ Opaque() bool }
|
||||
if img.Bounds().Empty() {
|
||||
return true
|
||||
}
|
||||
switch img := img.(type) {
|
||||
case *nrgb.Image, *image.CMYK, *image.YCbCr, *image.Gray, *image.Gray16:
|
||||
return true
|
||||
case *image.NRGBA:
|
||||
return is_opaque8(img.Pix, img.Bounds().Dx(), img.Bounds().Dy(), img.Stride)
|
||||
case *image.RGBA:
|
||||
return is_opaque8(img.Pix, img.Bounds().Dx(), img.Bounds().Dy(), img.Stride)
|
||||
case *image.NRGBA64:
|
||||
return is_opaque16(img.Pix, img.Bounds().Dx(), img.Bounds().Dy(), img.Stride)
|
||||
case *image.RGBA64:
|
||||
return is_opaque16(img.Pix, img.Bounds().Dx(), img.Bounds().Dy(), img.Stride)
|
||||
case *image.NYCbCrA:
|
||||
return is_opaque1(img.A, img.Bounds().Dx(), img.Bounds().Dy(), img.AStride)
|
||||
case *image.Paletted:
|
||||
bad_colors := make([]uint8, 0, len(img.Palette))
|
||||
for i, c := range img.Palette {
|
||||
_, _, _, a := c.RGBA()
|
||||
if a != 0xffff {
|
||||
bad_colors = append(bad_colors, uint8(i))
|
||||
}
|
||||
}
|
||||
switch len(bad_colors) {
|
||||
case 0:
|
||||
return true
|
||||
case len(img.Palette):
|
||||
return false
|
||||
case 1:
|
||||
return bytes.IndexByte(img.Pix, bad_colors[0]) < 0
|
||||
default:
|
||||
is_opaque := atomic.Bool{}
|
||||
is_opaque.Store(true)
|
||||
if err := parallel.Run_in_parallel_to_first_result(0, func(start, limit int, keep_going *atomic.Bool) bool {
|
||||
for i := start; i < limit && keep_going.Load(); i++ {
|
||||
if bytes.IndexByte(img.Pix, bad_colors[i]) > -1 {
|
||||
is_opaque.Store(false)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 0, len(bad_colors)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return is_opaque.Load()
|
||||
}
|
||||
case is_opaque:
|
||||
return img.Opaque()
|
||||
}
|
||||
return false
|
||||
}
|
||||
29
vendor/github.com/kovidgoyal/imaging/prism/meta/autometa/autometa.go
generated
vendored
29
vendor/github.com/kovidgoyal/imaging/prism/meta/autometa/autometa.go
generated
vendored
@@ -1,16 +1,27 @@
|
||||
package autometa
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/gifmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/jpegmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/netpbmmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/pngmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/tiffmeta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/webpmeta"
|
||||
"github.com/kovidgoyal/imaging/streams"
|
||||
)
|
||||
|
||||
var loaders = []func(io.Reader) (*meta.Data, error){
|
||||
jpegmeta.ExtractMetadata,
|
||||
pngmeta.ExtractMetadata,
|
||||
gifmeta.ExtractMetadata,
|
||||
webpmeta.ExtractMetadata,
|
||||
tiffmeta.ExtractMetadata,
|
||||
netpbmmeta.ExtractMetadata,
|
||||
}
|
||||
|
||||
// Load loads the metadata for an image stream, which may be one of the
|
||||
// supported image formats.
|
||||
//
|
||||
@@ -20,22 +31,20 @@ import (
|
||||
// stream. This provides a convenient way to load the full image after loading
|
||||
// the metadata.
|
||||
//
|
||||
// An error is returned if basic metadata could not be extracted. The returned
|
||||
// stream still provides the full image data.
|
||||
// Returns nil if the no image format was recognized. Returns an error if there
|
||||
// was an error decoding metadata.
|
||||
func Load(r io.Reader) (md *meta.Data, imgStream io.Reader, err error) {
|
||||
loaders := []func(io.Reader) (*meta.Data, error){
|
||||
pngmeta.ExtractMetadata,
|
||||
jpegmeta.ExtractMetadata,
|
||||
webpmeta.ExtractMetadata,
|
||||
}
|
||||
for _, loader := range loaders {
|
||||
r, err = streams.CallbackWithSeekable(r, func(r io.Reader) (err error) {
|
||||
md, err = loader(r)
|
||||
return
|
||||
})
|
||||
if err == nil {
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, r, err
|
||||
case md != nil:
|
||||
return md, r, nil
|
||||
}
|
||||
}
|
||||
return nil, r, fmt.Errorf("unrecognised image format")
|
||||
return nil, r, nil
|
||||
}
|
||||
|
||||
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/autometa/orientation_2.tiff
generated
vendored
Normal file
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/autometa/orientation_2.tiff
generated
vendored
Normal file
Binary file not shown.
452
vendor/github.com/kovidgoyal/imaging/prism/meta/cicp.go
generated
vendored
Normal file
452
vendor/github.com/kovidgoyal/imaging/prism/meta/cicp.go
generated
vendored
Normal file
@@ -0,0 +1,452 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type CodingIndependentCodePoints struct {
|
||||
ColorPrimaries, TransferCharacteristics, MatrixCoefficients, VideoFullRange uint8
|
||||
IsSet bool
|
||||
}
|
||||
|
||||
var SRGB = CodingIndependentCodePoints{1, 13, 0, 1, true}
|
||||
var DISPLAY_P3 = CodingIndependentCodePoints{12, 13, 0, 1, true}
|
||||
|
||||
func (c CodingIndependentCodePoints) String() string {
|
||||
return fmt.Sprintf("CodingIndependentCodePoints{ColorPrimaries: %d, TransferCharacteristics: %d, MatrixCoefficients: %d, VideoFullRange: %d}", c.ColorPrimaries, c.TransferCharacteristics, c.MatrixCoefficients, c.VideoFullRange)
|
||||
}
|
||||
|
||||
func (c CodingIndependentCodePoints) IsSRGB() bool {
|
||||
return c == SRGB
|
||||
}
|
||||
|
||||
func (c CodingIndependentCodePoints) VideoFullRangeIsValid() bool {
|
||||
return c.VideoFullRange == 0 || c.VideoFullRange == 1
|
||||
}
|
||||
|
||||
// See https://www.w3.org/TR/png-3/#cICP-chunk for why we do this
|
||||
func extend_over_full_range(f func(float64) float64) func(float64) float64 {
|
||||
return func(x float64) float64 {
|
||||
return math.Copysign(1, x) * f(math.Abs(x))
|
||||
}
|
||||
}
|
||||
|
||||
func (src CodingIndependentCodePoints) PipelineTo(dest CodingIndependentCodePoints) *icc.Pipeline {
|
||||
if src == dest {
|
||||
return nil
|
||||
}
|
||||
if src.MatrixCoefficients != 0 || dest.MatrixCoefficients != 0 {
|
||||
return nil // TODO: Add support for these
|
||||
}
|
||||
if !src.VideoFullRangeIsValid() || !dest.VideoFullRangeIsValid() {
|
||||
return nil
|
||||
}
|
||||
p := primaries[int(src.ColorPrimaries)]
|
||||
if p.Name == "" {
|
||||
return nil
|
||||
}
|
||||
tc := transfer_functions[int(src.TransferCharacteristics)]
|
||||
if tc.Name == "" {
|
||||
return nil
|
||||
}
|
||||
to_linear := icc.NewUniformFunctionTransformer(tc.Name, icc.IfElse(src.VideoFullRange == SRGB.VideoFullRange, tc.EOTF, extend_over_full_range(tc.EOTF)))
|
||||
if tc.Name == "Identity" {
|
||||
to_linear = nil
|
||||
}
|
||||
linear_to_xyz := p.CalculateRGBtoXYZMatrix()
|
||||
p = primaries[int(dest.ColorPrimaries)]
|
||||
if p.Name == "" {
|
||||
return nil
|
||||
}
|
||||
tc = transfer_functions[int(dest.TransferCharacteristics)]
|
||||
if tc.Name == "" {
|
||||
return nil
|
||||
}
|
||||
xyz_to_linear := p.CalculateRGBtoXYZMatrix()
|
||||
xyz_to_linear, err := xyz_to_linear.Inverted()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f := icc.IfElse(dest.VideoFullRange == SRGB.VideoFullRange, tc.OETF, extend_over_full_range(tc.OETF))
|
||||
from_linear := icc.NewUniformFunctionTransformer(tc.Name, func(x float64) float64 {
|
||||
// TODO: Gamut mapping for white point of dest, re-use code from colorconv
|
||||
return max(0, min(f(x), 1))
|
||||
})
|
||||
if tc.Name == "Identity" {
|
||||
from_linear = nil
|
||||
}
|
||||
ans := &icc.Pipeline{}
|
||||
ans.Append(to_linear, &linear_to_xyz, &xyz_to_linear, from_linear)
|
||||
ans.Finalize(true)
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c CodingIndependentCodePoints) PipelineToSRGB() *icc.Pipeline {
|
||||
return c.PipelineTo(SRGB)
|
||||
}
|
||||
|
||||
// XY holds CIE xy chromaticity coordinates.
|
||||
type XY struct {
|
||||
X, Y float64
|
||||
}
|
||||
|
||||
// ColorSpace defines the primaries and white point of a color space.
|
||||
type Primaries struct {
|
||||
Name string
|
||||
Red XY
|
||||
Green XY
|
||||
Blue XY
|
||||
White XY
|
||||
}
|
||||
|
||||
// xyToXYZ converts xy chromaticity to XYZ coordinates, assuming Y=1.
|
||||
func xyToXYZ(p XY) [3]float64 {
|
||||
if p.Y == 0 {
|
||||
return [3]float64{0, 0, 0}
|
||||
}
|
||||
return [3]float64{
|
||||
p.X / p.Y,
|
||||
1.0,
|
||||
(1.0 - p.X - p.Y) / p.Y,
|
||||
}
|
||||
}
|
||||
|
||||
// CalculateRGBtoXYZMatrix computes the matrix to convert from a linear RGB color space to CIE XYZ.
|
||||
func (cs *Primaries) CalculateRGBtoXYZMatrix() icc.Matrix3 {
|
||||
// Convert primaries to XYZ space (normalized to Y=1)
|
||||
r := xyToXYZ(cs.Red)
|
||||
g := xyToXYZ(cs.Green)
|
||||
b := xyToXYZ(cs.Blue)
|
||||
|
||||
// Form the matrix of primaries
|
||||
M := icc.Matrix3{
|
||||
{r[0], g[0], b[0]},
|
||||
{r[1], g[1], b[1]},
|
||||
{r[2], g[2], b[2]},
|
||||
}
|
||||
|
||||
// Calculate the scaling factors (S_r, S_g, S_b)
|
||||
invM, err := M.Inverted()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
whiteXYZ := xyToXYZ(cs.White)
|
||||
|
||||
s_r := invM[0][0]*whiteXYZ[0] + invM[0][1]*whiteXYZ[1] + invM[0][2]*whiteXYZ[2]
|
||||
s_g := invM[1][0]*whiteXYZ[0] + invM[1][1]*whiteXYZ[1] + invM[1][2]*whiteXYZ[2]
|
||||
s_b := invM[2][0]*whiteXYZ[0] + invM[2][1]*whiteXYZ[1] + invM[2][2]*whiteXYZ[2]
|
||||
|
||||
// Scale the primaries matrix to get the final conversion matrix
|
||||
finalMatrix := icc.Matrix3{
|
||||
{M[0][0] * s_r, M[0][1] * s_g, M[0][2] * s_b},
|
||||
{M[1][0] * s_r, M[1][1] * s_g, M[1][2] * s_b},
|
||||
{M[2][0] * s_r, M[2][1] * s_g, M[2][2] * s_b},
|
||||
}
|
||||
return finalMatrix
|
||||
}
|
||||
|
||||
type WellKnownPrimaries int
|
||||
|
||||
// These come from ITU-T H.273 spec
|
||||
var primaries = map[int]Primaries{
|
||||
1: {
|
||||
Name: "sRGB",
|
||||
Green: XY{X: 0.30, Y: 0.60},
|
||||
Blue: XY{X: 0.15, Y: 0.06},
|
||||
Red: XY{X: 0.64, Y: 0.33},
|
||||
White: XY{X: 0.3127, Y: 0.3290}, // D65
|
||||
},
|
||||
4: {
|
||||
Name: "BT-470M",
|
||||
Green: XY{X: 0.21, Y: 0.71},
|
||||
Blue: XY{X: 0.14, Y: 0.08},
|
||||
Red: XY{X: 0.67, Y: 0.33},
|
||||
White: XY{X: 0.310, Y: 0.316},
|
||||
},
|
||||
5: {
|
||||
Name: "BT-470B",
|
||||
Green: XY{X: 0.29, Y: 0.69},
|
||||
Blue: XY{X: 0.15, Y: 0.06},
|
||||
Red: XY{X: 0.64, Y: 0.33},
|
||||
White: XY{X: 0.310, Y: 0.316},
|
||||
},
|
||||
6: {
|
||||
Name: "BT-601",
|
||||
Green: XY{0.310, 0.595},
|
||||
Blue: XY{0.155, 0.070},
|
||||
Red: XY{0.630, 0.340},
|
||||
White: XY{0.3127, 0.3290},
|
||||
},
|
||||
7: {
|
||||
Name: "BT-601",
|
||||
Green: XY{0.310, 0.595},
|
||||
Blue: XY{0.155, 0.070},
|
||||
Red: XY{0.630, 0.340},
|
||||
White: XY{0.3127, 0.3290},
|
||||
},
|
||||
8: {
|
||||
Name: "Generic film",
|
||||
Green: XY{0.243, 0.692},
|
||||
Blue: XY{0.145, 0.049},
|
||||
Red: XY{0.681, 0.319},
|
||||
White: XY{0.310, 0.316},
|
||||
},
|
||||
9: {
|
||||
Name: "BT-2020",
|
||||
Green: XY{0.170, 0.797},
|
||||
Blue: XY{0.131, 0.046},
|
||||
Red: XY{0.708, 0.292},
|
||||
White: XY{0.3127, 0.3290},
|
||||
},
|
||||
10: { // 10
|
||||
Name: "SMPTE ST 428-1",
|
||||
Green: XY{0.0, 1.0},
|
||||
Blue: XY{0.0, 0.0},
|
||||
Red: XY{1.0, 0.0},
|
||||
White: XY{1 / 3., 1 / 3.},
|
||||
},
|
||||
11: { // 11
|
||||
Name: "DCI-P3",
|
||||
Green: XY{0.265, 0.690},
|
||||
Blue: XY{0.150, 0.060},
|
||||
Red: XY{0.680, 0.320},
|
||||
White: XY{0.314, 0.351}, // DCI White
|
||||
},
|
||||
12: { // 12
|
||||
Name: "Diplay P3",
|
||||
Green: XY{0.265, 0.690},
|
||||
Blue: XY{0.150, 0.060},
|
||||
Red: XY{0.680, 0.320},
|
||||
White: XY{0.3127, 0.3290}, // D65
|
||||
},
|
||||
22: { // 22
|
||||
Name: "Unnamed",
|
||||
Green: XY{0.295, 0.605},
|
||||
Blue: XY{0.155, 0.077},
|
||||
Red: XY{0.630, 0.340},
|
||||
White: XY{0.3127, 0.3290}, // D65
|
||||
},
|
||||
}
|
||||
|
||||
// TransferFunction defines an Opto-Electronic Transfer Function (OETF)
|
||||
// and its inverse Electro-Optical Transfer Function (EOTF).
|
||||
type TransferFunction struct {
|
||||
ID int
|
||||
Name string
|
||||
OETF func(float64) float64 // To non-linear
|
||||
EOTF func(float64) float64 // To linear
|
||||
}
|
||||
|
||||
// Constants from various specifications used in the transfer functions.
|
||||
const (
|
||||
// BT.709, BT.2020, BT.601
|
||||
alpha709 = 1.099
|
||||
beta709 = 0.018
|
||||
gamma709 = 0.45
|
||||
delta709 = 4.5
|
||||
|
||||
// SMPTE ST 240M
|
||||
alpha240M = 1.1115
|
||||
beta240M = 0.0228
|
||||
gamma240M = 0.45
|
||||
delta240M = 4.0
|
||||
|
||||
// SMPTE ST 428-1
|
||||
gamma428 = 1.0 / 2.6
|
||||
|
||||
// PQ (Perceptual Quantizer) - SMPTE ST 2084
|
||||
m1PQ = 2610.0 / 16384.0 // (2610 / 4096) * (1/4)
|
||||
m2PQ = 2523.0 / 32.0 // (2523 / 4096) * 128
|
||||
c1PQ = 3424.0 / 4096.0
|
||||
c2PQ = 2413.0 / 4096.0 * 32.0
|
||||
c3PQ = 2392.0 / 4096.0 * 32.0
|
||||
|
||||
// HLG (Hybrid Log-Gamma) - ARIB STD-B67
|
||||
aHLG = 0.17883277
|
||||
bHLG = 1.0 - 4.0*aHLG // 0.28466892
|
||||
cHLG = 0.55991073 // 0.5 - aHLG*math.Log(4.0*aHLG)
|
||||
)
|
||||
|
||||
// holds all the H.273 transfer characteristics.
|
||||
var transfer_functions = make(map[int]TransferFunction)
|
||||
|
||||
func init() {
|
||||
tf1 := TransferFunction{
|
||||
ID: 1, Name: "BT.709",
|
||||
OETF: func(L float64) float64 {
|
||||
if L < beta709 {
|
||||
return delta709 * L
|
||||
}
|
||||
return alpha709*math.Pow(L, gamma709) - (alpha709 - 1)
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
if V < delta709*beta709 {
|
||||
return V / delta709
|
||||
}
|
||||
return math.Pow((V+(alpha709-1))/alpha709, 1.0/gamma709)
|
||||
},
|
||||
}
|
||||
transfer_functions[1] = tf1
|
||||
transfer_functions[6] = tf1 // BT.601, BT.2020 share this with BT.709
|
||||
transfer_functions[14] = tf1 // BT.2020 10-bit
|
||||
transfer_functions[15] = tf1 // BT.2020 12-bit
|
||||
|
||||
// 2: Identity
|
||||
transfer_functions[2] = TransferFunction{
|
||||
ID: 2, Name: "Identity",
|
||||
OETF: func(v float64) float64 { return v },
|
||||
EOTF: func(v float64) float64 { return v },
|
||||
}
|
||||
transfer_functions[8] = transfer_functions[2]
|
||||
|
||||
// 4: Gamma 2.2
|
||||
tf4 := TransferFunction{
|
||||
ID: 4, Name: "Gamma 2.2",
|
||||
OETF: func(L float64) float64 { return math.Pow(L, 1.0/2.2) },
|
||||
EOTF: func(V float64) float64 { return math.Pow(V, 2.2) },
|
||||
}
|
||||
transfer_functions[4] = tf4
|
||||
transfer_functions[5] = tf4 // BT.470BG also uses Gamma 2.2 approx.
|
||||
|
||||
// 5: Gamma 2.8
|
||||
transfer_functions[5] = TransferFunction{
|
||||
ID: 5, Name: "Gamma 2.8",
|
||||
OETF: func(L float64) float64 { return math.Pow(L, 1.0/2.8) },
|
||||
EOTF: func(V float64) float64 { return math.Pow(V, 2.8) },
|
||||
}
|
||||
|
||||
// 7: SMPTE 240M
|
||||
tf7 := TransferFunction{
|
||||
ID: 7, Name: "SMPTE 240M",
|
||||
OETF: func(L float64) float64 {
|
||||
if L < beta240M {
|
||||
return delta240M * L
|
||||
}
|
||||
return alpha240M*math.Pow(L, gamma240M) - (alpha240M - 1)
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
if V < delta240M*beta240M {
|
||||
return V / delta240M
|
||||
}
|
||||
return math.Pow((V+(alpha240M-1))/alpha240M, 1.0/gamma240M)
|
||||
},
|
||||
}
|
||||
transfer_functions[7] = tf7
|
||||
// 9: Logarithmic (100:1)
|
||||
transfer_functions[9] = TransferFunction{
|
||||
ID: 9, Name: "Logarithmic (100:1)",
|
||||
OETF: func(L float64) float64 {
|
||||
return 1.0 - math.Log10(1.0-L*(1.0-math.Pow(10.0, -2.0)))/2.0
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
return (1.0 - math.Pow(10.0, -2.0*V)) / (1.0 - math.Pow(10.0, -2.0))
|
||||
},
|
||||
}
|
||||
|
||||
// 10: Logarithmic (100 * sqrt(10):1)
|
||||
transfer_functions[10] = TransferFunction{
|
||||
ID: 10, Name: "Logarithmic (100*sqrt(10):1)",
|
||||
OETF: func(L float64) float64 {
|
||||
return 1.0 - math.Log10(1.0-L*(1.0-math.Pow(10.0, -2.5)))/2.5
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
return (1.0 - math.Pow(10.0, -2.5*V)) / (1.0 - math.Pow(10.0, -2.5))
|
||||
},
|
||||
}
|
||||
|
||||
// 11: IEC 61966-2-4
|
||||
transfer_functions[11] = TransferFunction{
|
||||
ID: 11, Name: "IEC 61966-2-4",
|
||||
OETF: func(L float64) float64 {
|
||||
if L < -beta709 {
|
||||
return -delta709 * -L
|
||||
}
|
||||
if L > beta709 {
|
||||
return alpha709*math.Pow(L, gamma709) - (alpha709 - 1)
|
||||
}
|
||||
return delta709 * L
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
if V < -delta709*beta709 {
|
||||
return -math.Pow((-V+(alpha709-1))/alpha709, 1.0/gamma709)
|
||||
}
|
||||
if V > delta709*beta709 {
|
||||
return math.Pow((V+(alpha709-1))/alpha709, 1.0/gamma709)
|
||||
}
|
||||
return V / delta709
|
||||
},
|
||||
}
|
||||
|
||||
// 12: BT.1361 extended gamut
|
||||
tf12 := tf1 // It's based on BT.709
|
||||
tf12.ID = 12
|
||||
tf12.Name = "BT.1361"
|
||||
transfer_functions[12] = tf12
|
||||
|
||||
// 13: sRGB/IEC 61966-2-1
|
||||
transfer_functions[13] = TransferFunction{
|
||||
ID: 13, Name: "sRGB",
|
||||
OETF: func(L float64) float64 {
|
||||
if L <= 0.0031308 {
|
||||
return 12.92 * L
|
||||
}
|
||||
return 1.055*math.Pow(L, 1.0/2.4) - 0.055
|
||||
},
|
||||
EOTF: func(V float64) float64 {
|
||||
if V <= 0.04045 {
|
||||
return V / 12.92
|
||||
}
|
||||
return math.Pow((V+0.055)/1.055, 2.4)
|
||||
},
|
||||
}
|
||||
// 16: SMPTE ST 2084 (PQ)
|
||||
transfer_functions[16] = TransferFunction{
|
||||
ID: 16, Name: "SMPTE ST 2084 (PQ)",
|
||||
OETF: func(L float64) float64 { // EOTF^-1, L is normalized to 10000 cd/m^2
|
||||
Lp := math.Pow(L, m1PQ)
|
||||
return math.Pow((c1PQ+c2PQ*Lp)/(1.0+c3PQ*Lp), m2PQ)
|
||||
},
|
||||
EOTF: func(V float64) float64 { // V is non-linear signal
|
||||
Vp := math.Pow(V, 1.0/m2PQ)
|
||||
num := math.Max(Vp-c1PQ, 0.0)
|
||||
den := math.Max(c2PQ-c3PQ*Vp, 1e-6) // Avoid division by zero
|
||||
return math.Pow(num/den, 1.0/m1PQ)
|
||||
},
|
||||
}
|
||||
|
||||
// 17: SMPTE ST 428-1
|
||||
transfer_functions[17] = TransferFunction{
|
||||
ID: 17, Name: "SMPTE ST 428-1",
|
||||
OETF: func(L float64) float64 { // OOTF^-1, from linear scene light to D-cinema
|
||||
// Input L is assumed to be scene linear (48 cd/m^2 peak)
|
||||
// The spec normalizes by 52.37
|
||||
return math.Pow((L*48.0)/52.37, gamma428)
|
||||
},
|
||||
EOTF: func(V float64) float64 { // OOTF
|
||||
// Output is linear light, normalized to 1.0 for peak white (48 cd/m^2)
|
||||
return (52.37 / 48.0) * math.Pow(V, 1.0/gamma428)
|
||||
},
|
||||
}
|
||||
|
||||
// 18: ARIB STD-B67 (HLG)
|
||||
transfer_functions[18] = TransferFunction{
|
||||
ID: 18, Name: "ARIB STD-B67 (HLG)",
|
||||
OETF: func(L float64) float64 { // L is scene linear light, display-referred
|
||||
if L <= 1.0/12.0 {
|
||||
return math.Sqrt(3.0 * L)
|
||||
}
|
||||
return aHLG*math.Log(12.0*L-bHLG) + cHLG
|
||||
},
|
||||
EOTF: func(V float64) float64 { // V is the non-linear signal
|
||||
if V <= 0.5 {
|
||||
return (V * V) / 3.0
|
||||
}
|
||||
return (math.Exp((V-cHLG)/aHLG) + bHLG) / 12.0
|
||||
},
|
||||
}
|
||||
}
|
||||
119
vendor/github.com/kovidgoyal/imaging/prism/meta/data.go
generated
vendored
119
vendor/github.com/kovidgoyal/imaging/prism/meta/data.go
generated
vendored
@@ -3,21 +3,104 @@ package meta
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta/icc"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
|
||||
// Data represents the metadata for an image.
|
||||
type Data struct {
|
||||
Format ImageFormat
|
||||
PixelWidth uint32
|
||||
PixelHeight uint32
|
||||
BitsPerComponent uint32
|
||||
ExifData []byte
|
||||
iccProfileData []byte
|
||||
iccProfileErr error
|
||||
Format types.Format
|
||||
PixelWidth uint32
|
||||
PixelHeight uint32
|
||||
BitsPerComponent uint32
|
||||
HasFrames bool
|
||||
NumFrames, NumPlays int
|
||||
CICP CodingIndependentCodePoints
|
||||
|
||||
mutex sync.Mutex
|
||||
exifData []byte
|
||||
exif *exif.Exif
|
||||
exifErr error
|
||||
iccProfileData []byte
|
||||
iccProfileErr error
|
||||
iccProfile *icc.Profile
|
||||
}
|
||||
|
||||
func (s *Data) Clone() *Data {
|
||||
return &Data{
|
||||
Format: s.Format, PixelWidth: s.PixelWidth, PixelHeight: s.PixelHeight, BitsPerComponent: s.BitsPerComponent,
|
||||
HasFrames: s.HasFrames, NumFrames: s.NumFrames, NumPlays: s.NumPlays, CICP: s.CICP,
|
||||
exifData: slices.Clone(s.exifData), exifErr: s.exifErr, iccProfileData: slices.Clone(s.iccProfileData),
|
||||
iccProfileErr: s.iccProfileErr,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Data) IsSRGB() bool {
|
||||
if s.CICP.IsSet {
|
||||
return s.CICP.IsSRGB()
|
||||
}
|
||||
if p, err := s.ICCProfile(); p != nil && err == nil {
|
||||
return p.IsSRGB()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns an extracted EXIF metadata object from this metadata.
|
||||
//
|
||||
// An error is returned if the EXIF profile could not be correctly parsed.
|
||||
//
|
||||
// If no EXIF data was found, nil is returned without an error.
|
||||
func (md *Data) Exif() (*exif.Exif, error) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
|
||||
if md.exifErr != nil {
|
||||
return nil, md.exifErr
|
||||
}
|
||||
if md.exif != nil {
|
||||
return md.exif, nil
|
||||
}
|
||||
if len(md.exifData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
md.exif, md.exifErr = exif.Decode(bytes.NewReader(md.exifData))
|
||||
return md.exif, md.exifErr
|
||||
}
|
||||
|
||||
func (md *Data) SetExifData(data []byte) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
md.exifData = data
|
||||
md.exifErr = nil
|
||||
md.exif = nil
|
||||
}
|
||||
|
||||
func (md *Data) SetExif(e *exif.Exif) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
md.exifData = nil
|
||||
md.exifErr = nil
|
||||
md.exif = e
|
||||
}
|
||||
|
||||
func (md *Data) SetExifError(e error) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
md.exifData = nil
|
||||
md.exifErr = e
|
||||
md.exif = nil
|
||||
}
|
||||
|
||||
func (md *Data) ExifData() []byte {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
return md.exifData
|
||||
}
|
||||
|
||||
// ICCProfile returns an extracted ICC profile from this metadata.
|
||||
@@ -26,29 +109,43 @@ type Data struct {
|
||||
//
|
||||
// If no profile data was found, nil is returned without an error.
|
||||
func (md *Data) ICCProfile() (*icc.Profile, error) {
|
||||
if md.iccProfileData == nil {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
|
||||
if md.iccProfileErr != nil {
|
||||
return nil, md.iccProfileErr
|
||||
}
|
||||
|
||||
return icc.NewProfileReader(bytes.NewReader(md.iccProfileData)).ReadProfile()
|
||||
if len(md.iccProfileData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
md.iccProfile, md.iccProfileErr = icc.NewProfileReader(bytes.NewReader(md.iccProfileData)).ReadProfile()
|
||||
return md.iccProfile, md.iccProfileErr
|
||||
}
|
||||
|
||||
// ICCProfile returns the raw ICC profile data from this metadata.
|
||||
// ICCProfileData returns the raw ICC profile data from this metadata.
|
||||
//
|
||||
// An error is returned if the ICC profile could not be correctly extracted from
|
||||
// the image.
|
||||
//
|
||||
// If no profile data was found, nil is returned without an error.
|
||||
func (md *Data) ICCProfileData() ([]byte, error) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
return md.iccProfileData, md.iccProfileErr
|
||||
}
|
||||
|
||||
func (md *Data) SetICCProfileData(data []byte) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
md.iccProfileData = data
|
||||
md.iccProfileErr = nil
|
||||
md.iccProfile = nil
|
||||
}
|
||||
|
||||
func (md *Data) SetICCProfileError(err error) {
|
||||
md.mutex.Lock()
|
||||
defer md.mutex.Unlock()
|
||||
md.iccProfileData = nil
|
||||
md.iccProfile = nil
|
||||
md.iccProfileErr = err
|
||||
}
|
||||
|
||||
50
vendor/github.com/kovidgoyal/imaging/prism/meta/gifmeta/gifmeta.go
generated
vendored
Normal file
50
vendor/github.com/kovidgoyal/imaging/prism/meta/gifmeta/gifmeta.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package gifmeta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image/gif"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
c, err := gif.DecodeConfig(r)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "gif: can't recognize format") {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
md = &meta.Data{
|
||||
Format: types.GIF, PixelWidth: uint32(c.Width), PixelHeight: uint32(c.Height),
|
||||
BitsPerComponent: 8, HasFrames: true,
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
|
||||
func CalcMinimumGap(gaps []int) (min_gap int) {
|
||||
// Some broken GIF images have all zero gaps, browsers with their usual
|
||||
// idiot ideas render these with a default 100ms gap https://bugzilla.mozilla.org/show_bug.cgi?id=125137
|
||||
// Browsers actually force a 100ms gap at any zero gap frame, but that
|
||||
// just means it is impossible to deliberately use zero gap frames for
|
||||
// sophisticated blending, so we dont do that.
|
||||
max_gap := 0
|
||||
for _, g := range gaps {
|
||||
max_gap = max(max_gap, g)
|
||||
}
|
||||
if max_gap <= 0 {
|
||||
min_gap = 10
|
||||
}
|
||||
return min_gap
|
||||
}
|
||||
|
||||
func CalculateFrameDelay(delay, min_gap int) time.Duration {
|
||||
delay_ms := max(min_gap, delay)
|
||||
return time.Duration(delay_ms) * 10 * time.Millisecond
|
||||
}
|
||||
104
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/blackpoint.go
generated
vendored
Normal file
104
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/blackpoint.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func (p *Profile) IsMatrixShaper() bool {
|
||||
h := p.TagTable.Has
|
||||
switch p.Header.DataColorSpace {
|
||||
case ColorSpaceGray:
|
||||
return h(GrayTRCTagSignature)
|
||||
case ColorSpaceRGB:
|
||||
return h(RedColorantTagSignature) && h(RedTRCTagSignature) && h(GreenColorantTagSignature) && h(GreenTRCTagSignature) && h(BlueColorantTagSignature) && h(BlueTRCTagSignature)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Profile) BlackPoint(intent RenderingIntent, debug General_debug_callback) (ans XYZType) {
|
||||
if q := p.blackpoints[intent]; q != nil {
|
||||
return *q
|
||||
}
|
||||
defer func() {
|
||||
p.blackpoints[intent] = &ans
|
||||
}()
|
||||
if p.Header.DeviceClass == DeviceClassLink || p.Header.DeviceClass == DeviceClassAbstract || p.Header.DeviceClass == DeviceClassNamedColor {
|
||||
return
|
||||
}
|
||||
if !(intent == PerceptualRenderingIntent || intent == SaturationRenderingIntent || intent == RelativeColorimetricRenderingIntent) {
|
||||
return
|
||||
}
|
||||
if p.Header.Version.Major >= 4 && (intent == PerceptualRenderingIntent || intent == SaturationRenderingIntent) {
|
||||
if p.IsMatrixShaper() {
|
||||
return p.black_point_as_darker_colorant(RelativeColorimetricRenderingIntent, debug)
|
||||
}
|
||||
return XYZType{0.00336, 0.0034731, 0.00287}
|
||||
}
|
||||
if intent == RelativeColorimetricRenderingIntent && p.Header.DeviceClass == DeviceClassOutput && p.Header.DataColorSpace == ColorSpaceCMYK {
|
||||
return p.black_point_using_perceptual_black(debug)
|
||||
}
|
||||
return p.black_point_as_darker_colorant(intent, debug)
|
||||
}
|
||||
|
||||
func (p *Profile) black_point_as_darker_colorant(intent RenderingIntent, debug General_debug_callback) XYZType {
|
||||
bp := p.Header.DataColorSpace.BlackPoint()
|
||||
if bp == nil || (len(bp) != 3 && len(bp) != 4) {
|
||||
return XYZType{}
|
||||
}
|
||||
tr, err := p.CreateTransformerToPCS(intent, len(bp), debug == nil)
|
||||
if err != nil {
|
||||
return XYZType{}
|
||||
}
|
||||
if p.Header.ProfileConnectionSpace == ColorSpaceXYZ {
|
||||
tr.Append(NewXYZtoLAB(p.PCSIlluminant))
|
||||
}
|
||||
var l, a, b unit_float
|
||||
if debug == nil {
|
||||
if len(bp) == 3 {
|
||||
l, a, b = tr.Transform(bp[0], bp[1], bp[2])
|
||||
} else {
|
||||
var x [4]unit_float
|
||||
tr.TransformGeneral(x[:], bp)
|
||||
l, a, b = x[0], x[1], x[2]
|
||||
}
|
||||
} else {
|
||||
var x [4]unit_float
|
||||
tr.TransformGeneralDebug(x[:], bp, debug)
|
||||
l, a, b = x[0], x[1], x[2]
|
||||
}
|
||||
a, b = 0, 0
|
||||
if l < 0 || l > 50 {
|
||||
l = 0
|
||||
}
|
||||
x, y, z := NewLABtoXYZ(p.PCSIlluminant).Transform(l, a, b)
|
||||
return XYZType{x, y, z}
|
||||
}
|
||||
|
||||
func (p *Profile) black_point_using_perceptual_black(debug General_debug_callback) XYZType {
|
||||
dev, err := p.CreateTransformerToDevice(PerceptualRenderingIntent, false, debug == nil)
|
||||
if err != nil {
|
||||
return XYZType{}
|
||||
}
|
||||
tr, err := p.CreateTransformerToPCS(RelativeColorimetricRenderingIntent, 4, debug == nil)
|
||||
if err != nil {
|
||||
return XYZType{}
|
||||
}
|
||||
dev = dev.Weld(tr, debug == nil)
|
||||
if !dev.IsSuitableFor(3, 3) {
|
||||
return XYZType{}
|
||||
}
|
||||
lab := [4]unit_float{}
|
||||
if debug == nil {
|
||||
dev.TransformGeneral(lab[:], []unit_float{0, 0, 0, 0})
|
||||
} else {
|
||||
dev.TransformGeneralDebug(lab[:], []unit_float{0, 0, 0, 0}, debug)
|
||||
}
|
||||
l, a, b := lab[0], lab[1], lab[2]
|
||||
l = min(l, 50)
|
||||
a, b = 0, 0
|
||||
x, y, z := NewLABtoXYZ(p.PCSIlluminant).Transform(l, a, b)
|
||||
return XYZType{x, y, z}
|
||||
}
|
||||
14
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/colorspace.go
generated
vendored
14
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/colorspace.go
generated
vendored
@@ -32,6 +32,20 @@ const (
|
||||
ColorSpace15Color ColorSpace = 0x46434C52 // 'FCLR'
|
||||
)
|
||||
|
||||
func (cs ColorSpace) BlackPoint() []unit_float {
|
||||
switch cs {
|
||||
case ColorSpaceLab, ColorSpaceRGB, ColorSpaceXYZ:
|
||||
return []unit_float{0, 0, 0}
|
||||
case ColorSpaceGray:
|
||||
return []unit_float{0}
|
||||
case ColorSpaceCMYK:
|
||||
return []unit_float{1, 1, 1, 1}
|
||||
case ColorSpaceCMY:
|
||||
return []unit_float{1, 1, 1}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs ColorSpace) String() string {
|
||||
switch cs {
|
||||
case ColorSpaceXYZ:
|
||||
|
||||
19
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/header.go
generated
vendored
19
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/header.go
generated
vendored
@@ -2,9 +2,18 @@ package icc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
type unit_float = float64
|
||||
|
||||
// We consider two floats equal if they result in the same uint16 representation
|
||||
const FLOAT_EQUALITY_THRESHOLD = 1. / math.MaxUint16
|
||||
|
||||
func pow(a, b unit_float) unit_float { return unit_float(math.Pow(float64(a), float64(b))) }
|
||||
func abs(a unit_float) unit_float { return unit_float(math.Abs(float64(a))) }
|
||||
|
||||
type Header struct {
|
||||
ProfileSize uint32
|
||||
PreferredCMM Signature
|
||||
@@ -20,7 +29,7 @@ type Header struct {
|
||||
DeviceModel Signature
|
||||
DeviceAttributes uint64
|
||||
RenderingIntent RenderingIntent
|
||||
PCSIlluminant [3]uint32
|
||||
PCSIlluminant [12]uint8
|
||||
ProfileCreator Signature
|
||||
ProfileID [16]byte
|
||||
Reserved [28]byte
|
||||
@@ -39,6 +48,10 @@ func (h Header) DependsOnEmbeddedData() bool {
|
||||
return (h.Flags>>30)&1 != 0
|
||||
}
|
||||
|
||||
func (h Header) String() string {
|
||||
return fmt.Sprintf("Header{PreferredCMM: %s, Version: %s, DeviceManufacturer: %s, DeviceModel: %s, ProfileCreator: %s, RenderingIntent: %s, CreatedAt: %v}", h.PreferredCMM, h.Version, h.DeviceManufacturer, h.DeviceModel, h.ProfileCreator, h.RenderingIntent, h.CreatedAt())
|
||||
func (h Header) ParsedPCSIlluminant() XYZType {
|
||||
return xyz_type(h.PCSIlluminant[:])
|
||||
}
|
||||
|
||||
func (h Header) String() string {
|
||||
return fmt.Sprintf("Header{PreferredCMM: %s, Version: %s, DeviceManufacturer: %s, DeviceModel: %s, ProfileCreator: %s, RenderingIntent: %s, CreatedAt: %v PCSIlluminant: %v}", h.PreferredCMM, h.Version, h.DeviceManufacturer, h.DeviceModel, h.ProfileCreator, h.RenderingIntent, h.CreatedAt(), h.ParsedPCSIlluminant())
|
||||
}
|
||||
|
||||
193
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/interpolate.go
generated
vendored
Normal file
193
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/interpolate.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type interpolation_data struct {
|
||||
num_inputs, num_outputs int
|
||||
samples []unit_float
|
||||
grid_points []int
|
||||
max_grid_points []int
|
||||
tetrahedral_index_lookup []int
|
||||
}
|
||||
|
||||
func make_interpolation_data(num_inputs, num_outputs int, grid_points []int, samples []unit_float) *interpolation_data {
|
||||
var tetrahedral_index_lookup [4]int
|
||||
max_grid_points := make([]int, len(grid_points))
|
||||
for i, g := range grid_points {
|
||||
max_grid_points[i] = g - 1
|
||||
}
|
||||
if num_inputs >= 3 {
|
||||
tetrahedral_index_lookup[0] = num_outputs
|
||||
for i := 1; i < num_inputs; i++ {
|
||||
tetrahedral_index_lookup[i] = tetrahedral_index_lookup[i-1] * grid_points[num_inputs-1]
|
||||
}
|
||||
}
|
||||
return &interpolation_data{
|
||||
num_inputs: num_inputs, num_outputs: num_outputs, grid_points: grid_points, max_grid_points: max_grid_points,
|
||||
tetrahedral_index_lookup: tetrahedral_index_lookup[:], samples: samples,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *interpolation_data) tetrahedral_interpolation(r, g, b unit_float, output []unit_float) {
|
||||
r, g, b = clamp01(r), clamp01(g), clamp01(b)
|
||||
px := r * unit_float(c.max_grid_points[0])
|
||||
py := g * unit_float(c.max_grid_points[1])
|
||||
pz := b * unit_float(c.max_grid_points[2])
|
||||
x0, y0, z0 := int(px), int(py), int(pz)
|
||||
rx, ry, rz := px-unit_float(x0), py-unit_float(y0), pz-unit_float(z0)
|
||||
|
||||
X0 := c.tetrahedral_index_lookup[2] * x0
|
||||
X1 := X0
|
||||
if r < 1 {
|
||||
X1 += c.tetrahedral_index_lookup[2]
|
||||
}
|
||||
Y0 := c.tetrahedral_index_lookup[1] * y0
|
||||
Y1 := Y0
|
||||
if g < 1 {
|
||||
Y1 += c.tetrahedral_index_lookup[1]
|
||||
}
|
||||
Z0 := c.tetrahedral_index_lookup[0] * z0
|
||||
Z1 := Z0
|
||||
if b < 1 {
|
||||
Z1 += c.tetrahedral_index_lookup[0]
|
||||
}
|
||||
type w struct{ a, b int }
|
||||
var c1, c2, c3 w
|
||||
c0 := X0 + Y0 + Z0
|
||||
// The six tetrahedra
|
||||
switch {
|
||||
case rx >= ry && ry >= rz:
|
||||
c1 = w{X1 + Y0 + Z0, c0}
|
||||
c2 = w{X1 + Y1 + Z0, X1 + Y0 + Z0}
|
||||
c3 = w{X1 + Y1 + Z1, X1 + Y1 + Z0}
|
||||
case rx >= rz && rz >= ry:
|
||||
c1 = w{X1 + Y0 + Z0, c0}
|
||||
c2 = w{X1 + Y1 + Z1, X1 + Y0 + Z1}
|
||||
c3 = w{X1 + Y0 + Z1, X1 + Y0 + Z0}
|
||||
case rz >= rx && rx >= ry:
|
||||
c1 = w{X1 + Y0 + Z1, X0 + Y0 + Z1}
|
||||
c2 = w{X1 + Y1 + Z1, X1 + Y0 + Z1}
|
||||
c3 = w{X0 + Y0 + Z1, c0}
|
||||
case ry >= rx && rx >= rz:
|
||||
c1 = w{X1 + Y1 + Z0, X0 + Y1 + Z0}
|
||||
c2 = w{X0 + Y1 + Z0, c0}
|
||||
c3 = w{X1 + Y1 + Z1, X1 + Y1 + Z0}
|
||||
case ry >= rz && rz >= rx:
|
||||
c1 = w{X1 + Y1 + Z1, X0 + Y1 + Z1}
|
||||
c2 = w{X0 + Y1 + Z0, c0}
|
||||
c3 = w{X0 + Y1 + Z1, X0 + Y1 + Z0}
|
||||
case rz >= ry && ry >= rx:
|
||||
c1 = w{X1 + Y1 + Z1, X0 + Y1 + Z1}
|
||||
c2 = w{X0 + Y1 + Z1, X0 + Y0 + Z1}
|
||||
c3 = w{X0 + Y0 + Z1, c0}
|
||||
}
|
||||
for o := range c.num_outputs {
|
||||
s := c.samples[o:]
|
||||
output[o] = s[c0] + (s[c1.a]-s[c1.b])*rx + (s[c2.a]-s[c2.b])*ry + (s[c3.a]-s[c3.b])*rz
|
||||
}
|
||||
}
|
||||
|
||||
// For more that 3 inputs (i.e., CMYK)
|
||||
// evaluate two 3-dimensional interpolations and then linearly interpolate between them.
|
||||
func (d *interpolation_data) tetrahedral_interpolation4(c, m, y, k unit_float, output []unit_float) {
|
||||
var tmp1, tmp2 [4]float64
|
||||
pk := clamp01(c) * unit_float(d.max_grid_points[0])
|
||||
k0 := int(math.Trunc(pk))
|
||||
rest := pk - unit_float(k0)
|
||||
|
||||
K0 := d.tetrahedral_index_lookup[3] * k0
|
||||
K1 := K0 + IfElse(c >= 1, 0, d.tetrahedral_index_lookup[3])
|
||||
|
||||
half := *d
|
||||
half.grid_points = half.grid_points[1:]
|
||||
half.max_grid_points = half.max_grid_points[1:]
|
||||
|
||||
half.samples = d.samples[K0:]
|
||||
half.tetrahedral_interpolation(m, y, k, tmp1[:len(output)])
|
||||
|
||||
half.samples = d.samples[K1:]
|
||||
half.tetrahedral_interpolation(m, y, k, tmp2[:len(output)])
|
||||
|
||||
for i := range output {
|
||||
y0, y1 := tmp1[i], tmp2[i]
|
||||
output[i] = y0 + (y1-y0)*rest
|
||||
}
|
||||
}
|
||||
|
||||
func sampled_value(samples []unit_float, max_idx unit_float, x unit_float) unit_float {
|
||||
idx := clamp01(x) * max_idx
|
||||
lof := unit_float(math.Trunc(float64(idx)))
|
||||
lo := int(lof)
|
||||
if lof == idx {
|
||||
return samples[lo]
|
||||
}
|
||||
p := idx - unit_float(lo)
|
||||
vhi := unit_float(samples[lo+1])
|
||||
vlo := unit_float(samples[lo])
|
||||
return vlo + p*(vhi-vlo)
|
||||
}
|
||||
|
||||
// Performs an n-linear interpolation on the CLUT values for the given input color using an iterative method.
|
||||
// Input values should be normalized between 0.0 and 1.0. Output MUST be zero initialized.
|
||||
func (c *interpolation_data) trilinear_interpolate(input, output []unit_float) {
|
||||
// Pre-allocate slices for indices and weights
|
||||
var buf [4]int
|
||||
var wbuf [4]unit_float
|
||||
indices := buf[:c.num_inputs]
|
||||
weights := wbuf[:c.num_inputs]
|
||||
input = input[:c.num_inputs]
|
||||
output = output[:c.num_outputs]
|
||||
|
||||
// Calculate the base indices and interpolation weights for each dimension.
|
||||
for i, val := range input {
|
||||
val = clamp01(val)
|
||||
// Scale the value to the grid dimensions
|
||||
pos := val * unit_float(c.max_grid_points[i])
|
||||
// The base index is the floor of the position.
|
||||
idx := int(pos)
|
||||
// The weight is the fractional part of the position.
|
||||
weight := pos - unit_float(idx)
|
||||
// Clamp index to be at most the second to last grid point.
|
||||
if idx >= c.max_grid_points[i] {
|
||||
idx = c.max_grid_points[i] - 1
|
||||
weight = 1 // set weight to 1 for border index
|
||||
}
|
||||
indices[i] = idx
|
||||
weights[i] = weight
|
||||
}
|
||||
// Iterate through all 2^InputChannels corners of the n-dimensional hypercube
|
||||
for i := range 1 << c.num_inputs {
|
||||
// Calculate the combined weight for this corner
|
||||
cornerWeight := unit_float(1)
|
||||
// Calculate the N-dimensional index to look up in the table
|
||||
tableIndex := 0
|
||||
multiplier := unit_float(1)
|
||||
|
||||
// As per section 10.12.3 of ICC.1-2022-5.pdf spec the first input channel
|
||||
// varies least rapidly and the last varies most rapidly
|
||||
for j := c.num_inputs - 1; j >= 0; j-- {
|
||||
// Check the j-th bit of i to decide if we are at the lower or upper bound for this dimension
|
||||
if (i>>j)&1 == 1 {
|
||||
// Upper bound for this dimension
|
||||
cornerWeight *= weights[j]
|
||||
tableIndex += int(unit_float(indices[j]+1) * multiplier)
|
||||
} else {
|
||||
// Lower bound for this dimension
|
||||
cornerWeight *= (1.0 - weights[j])
|
||||
tableIndex += int(unit_float(indices[j]) * multiplier)
|
||||
}
|
||||
multiplier *= unit_float(c.grid_points[j])
|
||||
}
|
||||
// Get the color value from the table for the current corner
|
||||
offset := tableIndex * c.num_outputs
|
||||
// Add the weighted corner color to the output
|
||||
for k, v := range c.samples[offset : offset+c.num_outputs] {
|
||||
output[k] += v * cornerWeight
|
||||
}
|
||||
}
|
||||
}
|
||||
252
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/pcs.go
generated
vendored
Normal file
252
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/pcs.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kovidgoyal/imaging/colorconv"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
|
||||
const MAX_ENCODEABLE_XYZ = 1.0 + 32767.0/32768.0
|
||||
const MAX_ENCODEABLE_XYZ_INVERSE = 1.0 / (MAX_ENCODEABLE_XYZ)
|
||||
const LAB_MFT2_ENCODING_CORRECTION = 65535.0 / 65280.0
|
||||
const LAB_MFT2_ENCODING_CORRECTION_INVERSE = 65280.0 / 65535.0
|
||||
|
||||
func tg33(t func(r, g, b unit_float) (x, y, z unit_float), o, i []unit_float) {
|
||||
o[0], o[1], o[2] = t(i[0], i[1], i[2])
|
||||
}
|
||||
|
||||
type Scaling struct {
|
||||
name string
|
||||
s unit_float
|
||||
}
|
||||
|
||||
func (n *Scaling) String() string { return fmt.Sprintf("%s{%.6v}", n.name, n.s) }
|
||||
func (n *Scaling) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *Scaling) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
func (m *Scaling) Transform(x, y, z unit_float) (unit_float, unit_float, unit_float) {
|
||||
return x * m.s, y * m.s, z * m.s
|
||||
}
|
||||
func (m *Scaling) AsMatrix3() *Matrix3 { return NewScalingMatrix3(m.s) }
|
||||
|
||||
func (m *Scaling) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func NewScaling(name string, s unit_float) *Scaling { return &Scaling{name, s} }
|
||||
|
||||
type Scaling4 struct {
|
||||
name string
|
||||
s unit_float
|
||||
}
|
||||
|
||||
func (n *Scaling4) String() string { return fmt.Sprintf("%s{%.6v}", n.name, n.s) }
|
||||
func (n *Scaling4) IOSig() (int, int) { return 4, 4 }
|
||||
func (n *Scaling4) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
func (m *Scaling4) Transform(x, y, z unit_float) (unit_float, unit_float, unit_float) {
|
||||
return x * m.s, y * m.s, z * m.s
|
||||
}
|
||||
func (m *Scaling4) TransformGeneral(o, i []unit_float) {
|
||||
for x := range 4 {
|
||||
o[x] = m.s * i[x]
|
||||
}
|
||||
}
|
||||
|
||||
// A transformer to convert normalized [0,1] values to the [0,1.99997]
|
||||
// (u1Fixed15Number) values used by ICC XYZ PCS space
|
||||
func NewNormalizedToXYZ() *Scaling { return &Scaling{"NormalizedToXYZ", MAX_ENCODEABLE_XYZ} }
|
||||
func NewXYZToNormalized() *Scaling { return &Scaling{"XYZToNormalized", MAX_ENCODEABLE_XYZ_INVERSE} }
|
||||
|
||||
// A transformer that converts from the legacy LAB encoding used in the obsolete lut16type (mft2) tags
|
||||
func NewLABFromMFT2() *Scaling { return &Scaling{"LABFromMFT2", LAB_MFT2_ENCODING_CORRECTION} }
|
||||
func NewLABToMFT2() *Scaling { return &Scaling{"LABToMFT2", LAB_MFT2_ENCODING_CORRECTION_INVERSE} }
|
||||
|
||||
// A transformer to convert normalized [0,1] to the LAB co-ordinate system
|
||||
// used by ICC PCS LAB profiles [0-100], [-128, 127]
|
||||
type NormalizedToLAB int
|
||||
|
||||
func (n NormalizedToLAB) String() string { return "NormalizedToLAB" }
|
||||
func (n NormalizedToLAB) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *NormalizedToLAB) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
func (m *NormalizedToLAB) Transform(x, y, z unit_float) (unit_float, unit_float, unit_float) {
|
||||
// See PackLabDoubleFromFloat in lcms source code
|
||||
return x * 100, (y*255 - 128), (z*255 - 128)
|
||||
}
|
||||
|
||||
func (m *NormalizedToLAB) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func NewNormalizedToLAB() *NormalizedToLAB {
|
||||
x := NormalizedToLAB(0)
|
||||
return &x
|
||||
}
|
||||
|
||||
type LABToNormalized int
|
||||
|
||||
func (n LABToNormalized) String() string { return "LABToNormalized" }
|
||||
func (n LABToNormalized) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *LABToNormalized) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
func (m *LABToNormalized) Transform(x, y, z unit_float) (unit_float, unit_float, unit_float) {
|
||||
// See PackLabDoubleFromFloat in lcms source code
|
||||
return x * (1. / 100), (y*(1./255) + 128./255), (z*(1./255) + 128./255)
|
||||
}
|
||||
|
||||
func (m *LABToNormalized) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func NewLABToNormalized() *LABToNormalized {
|
||||
x := LABToNormalized(0)
|
||||
return &x
|
||||
}
|
||||
|
||||
type BlackPointCorrection struct {
|
||||
scale, offset XYZType
|
||||
}
|
||||
|
||||
func (n BlackPointCorrection) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *BlackPointCorrection) Iter(f func(ChannelTransformer) bool) {
|
||||
m := &Matrix3{{n.scale.X, 0, 0}, {0, n.scale.Y, 0}, {0, 0, n.scale.Z}}
|
||||
if !is_identity_matrix(m) {
|
||||
if !f(m) {
|
||||
return
|
||||
}
|
||||
}
|
||||
t := &Translation{n.offset.X, n.offset.Y, n.offset.Z}
|
||||
if !t.Empty() {
|
||||
f(t)
|
||||
}
|
||||
}
|
||||
|
||||
func NewBlackPointCorrection(in_whitepoint, in_blackpoint, out_blackpoint XYZType) *BlackPointCorrection {
|
||||
tx := in_blackpoint.X - in_whitepoint.X
|
||||
ty := in_blackpoint.Y - in_whitepoint.Y
|
||||
tz := in_blackpoint.Z - in_whitepoint.Z
|
||||
ans := BlackPointCorrection{}
|
||||
|
||||
ans.scale.X = (out_blackpoint.X - in_whitepoint.X) / tx
|
||||
ans.scale.Y = (out_blackpoint.Y - in_whitepoint.Y) / ty
|
||||
ans.scale.Z = (out_blackpoint.Z - in_whitepoint.Z) / tz
|
||||
|
||||
ans.offset.X = -in_whitepoint.X * (out_blackpoint.X - in_blackpoint.X) / tx
|
||||
ans.offset.Y = -in_whitepoint.Y * (out_blackpoint.Y - in_blackpoint.Y) / ty
|
||||
ans.offset.Z = -in_whitepoint.Z * (out_blackpoint.Z - in_blackpoint.Z) / tz
|
||||
ans.offset.X *= MAX_ENCODEABLE_XYZ_INVERSE
|
||||
ans.offset.Y *= MAX_ENCODEABLE_XYZ_INVERSE
|
||||
ans.offset.Z *= MAX_ENCODEABLE_XYZ_INVERSE
|
||||
|
||||
return &ans
|
||||
}
|
||||
|
||||
func (c *BlackPointCorrection) String() string {
|
||||
return fmt.Sprintf("BlackPointCorrection{scale: %v offset: %v}", c.scale, c.offset)
|
||||
}
|
||||
|
||||
func (c *BlackPointCorrection) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.scale.X*r + c.offset.X, c.scale.Y*g + c.offset.Y, c.scale.Z*b + c.offset.Z
|
||||
}
|
||||
func (m *BlackPointCorrection) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
type LABtosRGB struct {
|
||||
c *colorconv.ConvertColor
|
||||
t func(l, a, b unit_float) (x, y, z unit_float)
|
||||
}
|
||||
|
||||
func NewLABtosRGB(whitepoint XYZType, clamp, map_gamut bool) *LABtosRGB {
|
||||
c := colorconv.NewConvertColor(whitepoint.X, whitepoint.Y, whitepoint.Z, 1)
|
||||
if clamp {
|
||||
if map_gamut {
|
||||
return &LABtosRGB{c, c.LabToSRGB}
|
||||
}
|
||||
return &LABtosRGB{c, c.LabToSRGBClamp}
|
||||
}
|
||||
return &LABtosRGB{c, c.LabToSRGBNoGamutMap}
|
||||
}
|
||||
|
||||
func (c LABtosRGB) Transform(l, a, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.t(l, a, b)
|
||||
}
|
||||
func (m LABtosRGB) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
func (n LABtosRGB) IOSig() (int, int) { return 3, 3 }
|
||||
func (n LABtosRGB) String() string { return fmt.Sprintf("%T%s", n, n.c.String()) }
|
||||
func (n LABtosRGB) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
|
||||
type UniformFunctionTransformer struct {
|
||||
name string
|
||||
f func(unit_float) unit_float
|
||||
}
|
||||
|
||||
func (n UniformFunctionTransformer) IOSig() (int, int) { return 3, 3 }
|
||||
func (n UniformFunctionTransformer) String() string { return n.name }
|
||||
func (n *UniformFunctionTransformer) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
func (c *UniformFunctionTransformer) Transform(x, y, z unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.f(x), c.f(y), c.f(z)
|
||||
}
|
||||
func (c *UniformFunctionTransformer) TransformGeneral(o, i []unit_float) {
|
||||
for k, x := range i {
|
||||
o[k] = c.f(x)
|
||||
}
|
||||
}
|
||||
func NewUniformFunctionTransformer(name string, f func(unit_float) unit_float) *UniformFunctionTransformer {
|
||||
return &UniformFunctionTransformer{name, f}
|
||||
}
|
||||
|
||||
type XYZtosRGB struct {
|
||||
c *colorconv.ConvertColor
|
||||
t func(l, a, b unit_float) (x, y, z unit_float)
|
||||
}
|
||||
|
||||
func NewXYZtosRGB(whitepoint XYZType, clamp, map_gamut bool) *XYZtosRGB {
|
||||
c := colorconv.NewConvertColor(whitepoint.X, whitepoint.Y, whitepoint.Z, 1)
|
||||
if clamp {
|
||||
if map_gamut {
|
||||
return &XYZtosRGB{c, c.XYZToSRGB}
|
||||
}
|
||||
return &XYZtosRGB{c, c.XYZToSRGBNoGamutMap}
|
||||
}
|
||||
return &XYZtosRGB{c, c.XYZToSRGBNoClamp}
|
||||
}
|
||||
|
||||
func (n *XYZtosRGB) AddPreviousMatrix(m Matrix3) {
|
||||
n.c.AddPreviousMatrix(m[0], m[1], m[2])
|
||||
}
|
||||
|
||||
func (c *XYZtosRGB) Transform(l, a, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.t(l, a, b)
|
||||
}
|
||||
func (m *XYZtosRGB) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
func (n *XYZtosRGB) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *XYZtosRGB) String() string { return fmt.Sprintf("%T%s", n, n.c.String()) }
|
||||
func (n *XYZtosRGB) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
|
||||
type LABtoXYZ struct {
|
||||
c *colorconv.ConvertColor
|
||||
t func(l, a, b unit_float) (x, y, z unit_float)
|
||||
}
|
||||
|
||||
func NewLABtoXYZ(whitepoint XYZType) *LABtoXYZ {
|
||||
c := colorconv.NewConvertColor(whitepoint.X, whitepoint.Y, whitepoint.Z, 1)
|
||||
return &LABtoXYZ{c, c.LabToXYZ}
|
||||
}
|
||||
|
||||
func (c *LABtoXYZ) Transform(l, a, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.t(l, a, b)
|
||||
}
|
||||
func (m *LABtoXYZ) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
func (n *LABtoXYZ) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *LABtoXYZ) String() string { return fmt.Sprintf("%T%s", n, n.c.String()) }
|
||||
func (n *LABtoXYZ) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
|
||||
type XYZtoLAB struct {
|
||||
c *colorconv.ConvertColor
|
||||
t func(l, a, b unit_float) (x, y, z unit_float)
|
||||
}
|
||||
|
||||
func NewXYZtoLAB(whitepoint XYZType) *XYZtoLAB {
|
||||
c := colorconv.NewConvertColor(whitepoint.X, whitepoint.Y, whitepoint.Z, 1)
|
||||
return &XYZtoLAB{c, c.XYZToLab}
|
||||
}
|
||||
|
||||
func (c *XYZtoLAB) Transform(l, a, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.t(l, a, b)
|
||||
}
|
||||
func (m *XYZtoLAB) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
func (n *XYZtoLAB) IOSig() (int, int) { return 3, 3 }
|
||||
func (n *XYZtoLAB) String() string { return fmt.Sprintf("%T%s", n, n.c.String()) }
|
||||
func (n *XYZtoLAB) Iter(f func(ChannelTransformer) bool) { f(n) }
|
||||
253
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/pipeline.go
generated
vendored
Normal file
253
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/pipeline.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type Pipeline struct {
|
||||
transformers []ChannelTransformer
|
||||
tfuncs []func(r, g, b unit_float) (unit_float, unit_float, unit_float)
|
||||
has_lut16type_tag bool
|
||||
finalized bool
|
||||
}
|
||||
|
||||
type AsMatrix3 interface {
|
||||
AsMatrix3() *Matrix3
|
||||
}
|
||||
|
||||
// check for interface being nil or the dynamic value it points to being nil
|
||||
func is_nil(i any) bool {
|
||||
if i == nil {
|
||||
return true // interface itself is nil
|
||||
}
|
||||
v := reflect.ValueOf(i)
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
|
||||
return v.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) finalize(optimize bool) {
|
||||
if p.finalized {
|
||||
panic("pipeline already finalized")
|
||||
}
|
||||
p.finalized = true
|
||||
if optimize && len(p.transformers) > 1 {
|
||||
// Combine all neighboring Matrix3 transformers into a single transformer by multiplying the matrices
|
||||
var pm AsMatrix3
|
||||
nt := make([]ChannelTransformer, 0, len(p.transformers))
|
||||
for i := 0; i < len(p.transformers); {
|
||||
t := p.transformers[i]
|
||||
if tm, ok := t.(AsMatrix3); ok {
|
||||
for i+1 < len(p.transformers) {
|
||||
if pm, ok = p.transformers[i+1].(AsMatrix3); !ok {
|
||||
break
|
||||
}
|
||||
a, b := tm.AsMatrix3(), pm.AsMatrix3()
|
||||
combined := b.Multiply(*a)
|
||||
tm = &combined
|
||||
t = &combined
|
||||
i++
|
||||
}
|
||||
}
|
||||
nt = append(nt, t)
|
||||
i++
|
||||
}
|
||||
p.transformers = nt
|
||||
// Check if the last transform can absorb previous matrices
|
||||
if len(p.transformers) > 1 {
|
||||
last := p.transformers[len(p.transformers)-1]
|
||||
if apm, ok := last.(*XYZtosRGB); ok {
|
||||
p.transformers = p.transformers[:len(p.transformers)-1]
|
||||
for {
|
||||
m := p.remove_last_matrix3()
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
apm.AddPreviousMatrix(*m)
|
||||
}
|
||||
p.transformers = append(p.transformers, last)
|
||||
}
|
||||
}
|
||||
}
|
||||
p.tfuncs = make([]func(r unit_float, g unit_float, b unit_float) (unit_float, unit_float, unit_float), len(p.transformers))
|
||||
for i, t := range p.transformers {
|
||||
p.tfuncs[i] = t.Transform
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Finalize(optimize bool) { p.finalize(optimize) }
|
||||
|
||||
func (p *Pipeline) insert(idx int, c ChannelTransformer) {
|
||||
if is_nil(c) {
|
||||
return
|
||||
}
|
||||
switch c.(type) {
|
||||
case *IdentityMatrix:
|
||||
return
|
||||
}
|
||||
if len(p.transformers) == 0 {
|
||||
p.transformers = append(p.transformers, c)
|
||||
return
|
||||
}
|
||||
if idx >= len(p.transformers) {
|
||||
panic(fmt.Sprintf("cannot insert at idx: %d in pipeline of length: %d", idx, len(p.transformers)))
|
||||
}
|
||||
prepend := idx > -1
|
||||
if prepend {
|
||||
p.transformers = slices.Insert(p.transformers, idx, c)
|
||||
} else {
|
||||
p.transformers = append(p.transformers, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Insert(idx int, c ChannelTransformer) {
|
||||
s := slices.Collect(c.Iter)
|
||||
if idx > -1 {
|
||||
slices.Reverse(s)
|
||||
}
|
||||
for _, x := range s {
|
||||
p.insert(idx, x)
|
||||
}
|
||||
if mft, ok := c.(*MFT); ok && !mft.is8bit {
|
||||
p.has_lut16type_tag = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Append(c ...ChannelTransformer) {
|
||||
for _, x := range c {
|
||||
p.Insert(-1, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) remove_last_matrix3() *Matrix3 {
|
||||
if len(p.transformers) > 0 {
|
||||
if q, ok := p.transformers[len(p.transformers)-1].(AsMatrix3); ok {
|
||||
p.transformers = p.transformers[:len(p.transformers)-1]
|
||||
return q.AsMatrix3()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
for _, t := range p.tfuncs {
|
||||
r, g, b = t(r, g, b)
|
||||
}
|
||||
return r, g, b
|
||||
}
|
||||
|
||||
func (p *Pipeline) TransformDebug(r, g, b unit_float, f Debug_callback) (unit_float, unit_float, unit_float) {
|
||||
for _, t := range p.transformers {
|
||||
x, y, z := t.Transform(r, g, b)
|
||||
f(r, g, b, x, y, z, t)
|
||||
r, g, b = x, y, z
|
||||
}
|
||||
return r, g, b
|
||||
}
|
||||
|
||||
func (p *Pipeline) TransformGeneral(out, in []unit_float) {
|
||||
for _, t := range p.transformers {
|
||||
t.TransformGeneral(out, in)
|
||||
copy(in, out)
|
||||
}
|
||||
}
|
||||
|
||||
type General_debug_callback = func(in, out []unit_float, t ChannelTransformer)
|
||||
|
||||
func (p *Pipeline) TransformGeneralDebug(out, in []unit_float, f General_debug_callback) {
|
||||
for _, t := range p.transformers {
|
||||
t.TransformGeneral(out, in)
|
||||
nin, nout := t.IOSig()
|
||||
f(in[:nin], out[:nout], t)
|
||||
copy(in, out)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) Len() int { return len(p.transformers) }
|
||||
|
||||
func (p *Pipeline) Weld(other *Pipeline, optimize bool) (ans *Pipeline) {
|
||||
ans = &Pipeline{}
|
||||
ans.transformers = append(ans.transformers, p.transformers...)
|
||||
ans.transformers = append(ans.transformers, other.transformers...)
|
||||
ans.finalize(true)
|
||||
ans.has_lut16type_tag = p.has_lut16type_tag || other.has_lut16type_tag
|
||||
return ans
|
||||
}
|
||||
|
||||
func transformers_as_string(t ...ChannelTransformer) string {
|
||||
items := make([]string, len(t))
|
||||
for i, t := range t {
|
||||
items[i] = t.String()
|
||||
}
|
||||
return strings.Join(items, " → ")
|
||||
}
|
||||
|
||||
func (p *Pipeline) String() string {
|
||||
return transformers_as_string(p.transformers...)
|
||||
}
|
||||
|
||||
func (p *Pipeline) IOSig() (i int, o int) {
|
||||
if len(p.transformers) == 0 {
|
||||
return -1, -1
|
||||
}
|
||||
i, _ = p.transformers[0].IOSig()
|
||||
_, o = p.transformers[len(p.transformers)-1].IOSig()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Pipeline) IsSuitableFor(i, o int) bool {
|
||||
for _, t := range p.transformers {
|
||||
qi, qo := t.IOSig()
|
||||
if qi != i {
|
||||
return false
|
||||
}
|
||||
i = qo
|
||||
}
|
||||
return i == o
|
||||
}
|
||||
|
||||
func (p *Pipeline) UseTrilinearInsteadOfTetrahedral() {
|
||||
for i, q := range p.transformers {
|
||||
if x, ok := q.(*TetrahedralInterpolate); ok {
|
||||
p.transformers[i] = &TrilinearInterpolate{x.d, x.legacy}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) IsXYZSRGB() bool {
|
||||
if p.Len() == 2 {
|
||||
if c, ok := p.transformers[0].(Curves); ok {
|
||||
is_srgb := true
|
||||
for _, cc := range c.Curves() {
|
||||
if q, ok := cc.(IsSRGB); ok {
|
||||
is_srgb = q.IsSRGB()
|
||||
} else {
|
||||
is_srgb = false
|
||||
}
|
||||
if !is_srgb {
|
||||
break
|
||||
}
|
||||
}
|
||||
if is_srgb {
|
||||
if c, ok := p.transformers[1].(AsMatrix3); ok {
|
||||
q := c.AsMatrix3()
|
||||
var expected_matrix = Matrix3{{0.218036, 0.192576, 0.0715343}, {0.111246, 0.358442, 0.0303044}, {0.00695811, 0.0485389, 0.357053}}
|
||||
// unfortunately there exist profiles in the wild that
|
||||
// deviate from the expected matrix by more than FLOAT_EQUALITY_THRESHOLD
|
||||
if q.Equals(&expected_matrix, 8.5*FLOAT_EQUALITY_THRESHOLD) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
439
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/profile.go
generated
vendored
439
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/profile.go
generated
vendored
@@ -1,7 +1,21 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
|
||||
type WellKnownProfile int
|
||||
|
||||
//go:embed test-profiles/sRGB-v4.icc
|
||||
var Srgb_xyz_profile_data []byte
|
||||
|
||||
const (
|
||||
UnknownProfile WellKnownProfile = iota
|
||||
SRGBProfile
|
||||
@@ -10,39 +24,11 @@ const (
|
||||
DisplayP3Profile
|
||||
)
|
||||
|
||||
func WellKnownProfileFromDescription(x string) WellKnownProfile {
|
||||
switch x {
|
||||
case "sRGB IEC61966-2.1", "sRGB_ICC_v4_Appearance.icc":
|
||||
return SRGBProfile
|
||||
case "Adobe RGB (1998)":
|
||||
return AdobeRGBProfile
|
||||
case "Display P3":
|
||||
return DisplayP3Profile
|
||||
case "ProPhoto RGB":
|
||||
return PhotoProProfile
|
||||
default:
|
||||
return UnknownProfile
|
||||
}
|
||||
}
|
||||
|
||||
func (p WellKnownProfile) String() string {
|
||||
switch p {
|
||||
case SRGBProfile:
|
||||
return "sRGB IEC61966-2.1"
|
||||
case AdobeRGBProfile:
|
||||
return "Adobe RGB (1998)"
|
||||
case PhotoProProfile:
|
||||
return "ProPhoto RGB"
|
||||
case DisplayP3Profile:
|
||||
return "Display P3"
|
||||
default:
|
||||
return "Unknown Profile"
|
||||
}
|
||||
}
|
||||
|
||||
type Profile struct {
|
||||
Header Header
|
||||
TagTable TagTable
|
||||
Header Header
|
||||
TagTable TagTable
|
||||
PCSIlluminant XYZType
|
||||
blackpoints map[RenderingIntent]*XYZType
|
||||
}
|
||||
|
||||
func (p *Profile) Description() (string, error) {
|
||||
@@ -57,44 +43,371 @@ func (p *Profile) DeviceModelDescription() (string, error) {
|
||||
return p.TagTable.getDeviceModelDescription()
|
||||
}
|
||||
|
||||
func (p *Profile) WellKnownProfile() WellKnownProfile {
|
||||
model, err := p.DeviceModelDescription()
|
||||
func (p *Profile) get_effective_chromatic_adaption(forward bool, intent RenderingIntent) (ans *Matrix3, err error) {
|
||||
if intent != AbsoluteColorimetricRenderingIntent { // ComputeConversion() in lcms
|
||||
return nil, nil
|
||||
}
|
||||
pcs_whitepoint := p.Header.ParsedPCSIlluminant()
|
||||
x, err := p.TagTable.get_parsed(MediaWhitePointTagSignature, p.Header.DataColorSpace, p.Header.ProfileConnectionSpace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wtpt, ok := x.(*XYZType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("wtpt tag is not of XYZType")
|
||||
}
|
||||
if pcs_whitepoint == *wtpt {
|
||||
return nil, nil
|
||||
}
|
||||
defer func() {
|
||||
if err == nil && ans != nil && !forward {
|
||||
m, ierr := ans.Inverted()
|
||||
if ierr == nil {
|
||||
ans = &m
|
||||
} else {
|
||||
ans, err = nil, ierr
|
||||
}
|
||||
}
|
||||
}()
|
||||
return p.TagTable.get_chromatic_adaption()
|
||||
}
|
||||
|
||||
func (p *Profile) create_matrix_trc_transformer(forward bool, chromatic_adaptation *Matrix3, pipeline *Pipeline) (err error) {
|
||||
if p.Header.ProfileConnectionSpace != ColorSpaceXYZ {
|
||||
return fmt.Errorf("matrix/TRC based profile using non XYZ PCS color space: %v", p.Header.ProfileConnectionSpace)
|
||||
}
|
||||
// See section F.3 of ICC.1-2202-5.pdf for how these transforms are composed
|
||||
var rc, gc, bc Curve1D
|
||||
if rc, err = p.TagTable.load_curve_tag(RedTRCTagSignature); err != nil {
|
||||
return err
|
||||
}
|
||||
if gc, err = p.TagTable.load_curve_tag(GreenTRCTagSignature); err != nil {
|
||||
return err
|
||||
}
|
||||
if bc, err = p.TagTable.load_curve_tag(BlueTRCTagSignature); err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := p.TagTable.load_rgb_matrix(forward)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var c Curves
|
||||
if forward {
|
||||
c = NewCurveTransformer("TRC", rc, gc, bc)
|
||||
} else {
|
||||
c = NewInverseCurveTransformer("TRC", rc, gc, bc)
|
||||
}
|
||||
if forward {
|
||||
pipeline.Append(c, m, chromatic_adaptation)
|
||||
} else {
|
||||
pipeline.Append(chromatic_adaptation, m, NewInverseCurveTransformer("TRC", rc, gc, bc))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// See section 8.10.2 of ICC.1-2202-05.pdf for tag selection algorithm
|
||||
func (p *Profile) find_conversion_tag(forward bool, rendering_intent RenderingIntent) (ans ChannelTransformer, err error) {
|
||||
var ans_sig Signature = UnknownSignature
|
||||
found_tag := false
|
||||
if forward {
|
||||
switch rendering_intent {
|
||||
case PerceptualRenderingIntent:
|
||||
ans_sig = AToB0TagSignature
|
||||
case RelativeColorimetricRenderingIntent:
|
||||
ans_sig = AToB1TagSignature
|
||||
case SaturationRenderingIntent:
|
||||
ans_sig = AToB2TagSignature
|
||||
case AbsoluteColorimetricRenderingIntent:
|
||||
ans_sig = AToB3TagSignature
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown rendering intent: %v", rendering_intent)
|
||||
}
|
||||
found_tag = p.TagTable.Has(ans_sig)
|
||||
const fallback = AToB0TagSignature
|
||||
if !found_tag && p.TagTable.Has(fallback) {
|
||||
ans_sig = fallback
|
||||
found_tag = true
|
||||
}
|
||||
} else {
|
||||
switch rendering_intent {
|
||||
case PerceptualRenderingIntent:
|
||||
ans_sig = BToA0TagSignature
|
||||
case RelativeColorimetricRenderingIntent:
|
||||
ans_sig = BToA1TagSignature
|
||||
case SaturationRenderingIntent:
|
||||
ans_sig = BToA2TagSignature
|
||||
case AbsoluteColorimetricRenderingIntent:
|
||||
ans_sig = BToA3TagSignature
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown rendering intent: %v", rendering_intent)
|
||||
}
|
||||
found_tag = p.TagTable.Has(ans_sig)
|
||||
const fallback = BToA0TagSignature
|
||||
if !found_tag && p.TagTable.Has(fallback) {
|
||||
ans_sig = fallback
|
||||
found_tag = true
|
||||
}
|
||||
}
|
||||
if !found_tag {
|
||||
return nil, nil
|
||||
}
|
||||
// We rely on profile reader to error out if the PCS color space is not XYZ
|
||||
// or LAB and the device colorspace is not RGB or CMYK
|
||||
input_colorspace, output_colorspace := p.Header.DataColorSpace, p.Header.ProfileConnectionSpace
|
||||
if !forward {
|
||||
input_colorspace, output_colorspace = output_colorspace, input_colorspace
|
||||
}
|
||||
c, err := p.TagTable.get_parsed(ans_sig, input_colorspace, output_colorspace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans, ok := c.(ChannelTransformer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s tag is not a ChannelTransformer: %T", ans_sig, c)
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func (p *Profile) effective_bpc(intent RenderingIntent, user_requested_bpc bool) bool {
|
||||
// See _cmsLinkProfiles() in cmscnvrt.c
|
||||
if intent == AbsoluteColorimetricRenderingIntent {
|
||||
return false
|
||||
}
|
||||
if (intent == PerceptualRenderingIntent || intent == SaturationRenderingIntent) && p.Header.Version.Major >= 4 {
|
||||
return true
|
||||
}
|
||||
return user_requested_bpc
|
||||
}
|
||||
|
||||
func (p *Profile) CreateTransformerToDevice(rendering_intent RenderingIntent, use_blackpoint_compensation, optimize bool) (ans *Pipeline, err error) {
|
||||
num_output_channels := len(p.Header.DataColorSpace.BlackPoint())
|
||||
if num_output_channels == 0 {
|
||||
return nil, fmt.Errorf("unsupported device color space: %s", p.Header.DataColorSpace)
|
||||
}
|
||||
defer func() {
|
||||
if err == nil && !ans.IsSuitableFor(3, num_output_channels) {
|
||||
err = fmt.Errorf("transformer to PCS %s not suitable for 3 output channels", ans.String())
|
||||
}
|
||||
if err == nil {
|
||||
ans.finalize(optimize)
|
||||
}
|
||||
}()
|
||||
ans = &Pipeline{}
|
||||
|
||||
if p.effective_bpc(rendering_intent, use_blackpoint_compensation) {
|
||||
var PCS_blackpoint XYZType // 0, 0, 0
|
||||
output_blackpoint := p.BlackPoint(rendering_intent, nil)
|
||||
if PCS_blackpoint != output_blackpoint {
|
||||
is_lab := p.Header.ProfileConnectionSpace == ColorSpaceLab
|
||||
if is_lab {
|
||||
ans.Append(NewLABtoXYZ(p.PCSIlluminant))
|
||||
ans.Append(NewXYZToNormalized())
|
||||
}
|
||||
ans.Append(NewBlackPointCorrection(p.PCSIlluminant, PCS_blackpoint, output_blackpoint))
|
||||
if is_lab {
|
||||
ans.Append(NewNormalizedToXYZ())
|
||||
ans.Append(NewXYZtoLAB(p.PCSIlluminant))
|
||||
}
|
||||
}
|
||||
}
|
||||
ans.Append(transform_for_pcs_colorspace(p.Header.ProfileConnectionSpace, false))
|
||||
|
||||
const forward = false
|
||||
b2a, err := p.find_conversion_tag(forward, rendering_intent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chromatic_adaptation, err := p.get_effective_chromatic_adaption(forward, rendering_intent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b2a != nil {
|
||||
ans.Append(b2a)
|
||||
ans.Append(chromatic_adaptation)
|
||||
if p.Header.ProfileConnectionSpace == ColorSpaceLab {
|
||||
// For some reason, lcms prefers trilinear over tetrahedral in this
|
||||
// case, see _cmsReadOutputLUT() in cmsio1.c
|
||||
ans.UseTrilinearInsteadOfTetrahedral()
|
||||
}
|
||||
} else {
|
||||
err = p.create_matrix_trc_transformer(forward, chromatic_adaptation, ans)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Profile) createTransformerToPCS(rendering_intent RenderingIntent) (ans *Pipeline, err error) {
|
||||
const forward = true
|
||||
ans = &Pipeline{}
|
||||
a2b, err := p.find_conversion_tag(forward, rendering_intent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chromatic_adaptation, err := p.get_effective_chromatic_adaption(forward, rendering_intent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a2b != nil {
|
||||
ans.Append(a2b)
|
||||
ans.Append(chromatic_adaptation)
|
||||
if ans.has_lut16type_tag && p.Header.ProfileConnectionSpace == ColorSpaceLab {
|
||||
// Need to scale the lut16type data for legacy LAB encoding in ICC profiles
|
||||
if p.Header.DataColorSpace == ColorSpaceLab {
|
||||
ans.Insert(0, NewLABToMFT2())
|
||||
}
|
||||
ans.Append(NewLABFromMFT2())
|
||||
}
|
||||
} else {
|
||||
err = p.create_matrix_trc_transformer(forward, chromatic_adaptation, ans)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Profile) IsSRGB() bool {
|
||||
if p.Header.ProfileConnectionSpace == ColorSpaceXYZ {
|
||||
tr, err := p.createTransformerToPCS(p.Header.RenderingIntent)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
tr.finalize(true)
|
||||
return tr.IsXYZSRGB()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func transform_for_pcs_colorspace(cs ColorSpace, forward bool) ChannelTransformer {
|
||||
switch cs {
|
||||
case ColorSpaceXYZ:
|
||||
if forward {
|
||||
return NewNormalizedToXYZ()
|
||||
}
|
||||
return NewXYZToNormalized()
|
||||
case ColorSpaceLab:
|
||||
if forward {
|
||||
return NewNormalizedToLAB()
|
||||
}
|
||||
return NewLABToNormalized()
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported PCS colorspace in profile: %s", cs))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Profile) CreateTransformerToPCS(rendering_intent RenderingIntent, input_channels int, optimize bool) (ans *Pipeline, err error) {
|
||||
ans, err = p.createTransformerToPCS(rendering_intent)
|
||||
if err == nil && !ans.IsSuitableFor(input_channels, 3) {
|
||||
err = fmt.Errorf("transformer to PCS %s not suitable for %d input channels", ans.String(), input_channels)
|
||||
}
|
||||
if err == nil {
|
||||
switch model {
|
||||
case "IEC 61966-2-1 Default RGB Colour Space - sRGB":
|
||||
return SRGBProfile
|
||||
ans.Append(transform_for_pcs_colorspace(p.Header.ProfileConnectionSpace, true))
|
||||
ans.finalize(optimize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Profile) CreateTransformerToSRGB(rendering_intent RenderingIntent, use_blackpoint_compensation bool, input_channels int, clamp, map_gamut, optimize bool) (ans *Pipeline, err error) {
|
||||
if ans, err = p.createTransformerToPCS(rendering_intent); err != nil {
|
||||
return
|
||||
}
|
||||
if !ans.IsSuitableFor(input_channels, 3) {
|
||||
return nil, fmt.Errorf("transformer to PCS %s not suitable for %d input channels", ans.String(), input_channels)
|
||||
}
|
||||
input_colorspace := p.Header.ProfileConnectionSpace
|
||||
if p.effective_bpc(rendering_intent, use_blackpoint_compensation) {
|
||||
var sRGB_blackpoint XYZType // 0, 0, 0
|
||||
input_blackpoint := p.BlackPoint(rendering_intent, nil)
|
||||
if input_blackpoint != sRGB_blackpoint {
|
||||
if input_colorspace == ColorSpaceLab {
|
||||
ans.Append(transform_for_pcs_colorspace(input_colorspace, true))
|
||||
ans.Append(NewLABtoXYZ(p.PCSIlluminant))
|
||||
ans.Append(NewXYZToNormalized())
|
||||
input_colorspace = ColorSpaceXYZ
|
||||
}
|
||||
ans.Append(NewBlackPointCorrection(p.PCSIlluminant, input_blackpoint, sRGB_blackpoint))
|
||||
}
|
||||
}
|
||||
d, err := p.Description()
|
||||
if err == nil {
|
||||
if ans := WellKnownProfileFromDescription(d); ans != UnknownProfile {
|
||||
return ans
|
||||
}
|
||||
ans.Append(transform_for_pcs_colorspace(input_colorspace, true))
|
||||
switch input_colorspace {
|
||||
case ColorSpaceXYZ:
|
||||
t := NewXYZtosRGB(p.PCSIlluminant, clamp, map_gamut)
|
||||
ans.Append(t)
|
||||
case ColorSpaceLab:
|
||||
ans.Append(NewLABtosRGB(p.PCSIlluminant, clamp, map_gamut))
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown PCS colorspace: %s", input_colorspace)
|
||||
}
|
||||
switch p.Header.DeviceManufacturer {
|
||||
case IECManufacturerSignature:
|
||||
switch p.Header.DeviceModel {
|
||||
case SRGBModelSignature:
|
||||
return SRGBProfile
|
||||
}
|
||||
case AdobeManufacturerSignature:
|
||||
switch p.Header.DeviceModel {
|
||||
case AdobeRGBModelSignature:
|
||||
return AdobeRGBProfile
|
||||
case PhotoProModelSignature:
|
||||
return PhotoProProfile
|
||||
}
|
||||
case AppleManufacturerSignature, AppleUpperManufacturerSignature:
|
||||
switch p.Header.DeviceModel {
|
||||
case DisplayP3ModelSignature:
|
||||
return DisplayP3Profile
|
||||
}
|
||||
}
|
||||
return UnknownProfile
|
||||
ans.finalize(optimize)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Profile) CreateDefaultTransformerToDevice() (*Pipeline, error) {
|
||||
return p.CreateTransformerToDevice(p.Header.RenderingIntent, false, true)
|
||||
}
|
||||
|
||||
func (p *Profile) CreateDefaultTransformerToPCS(input_channels int) (*Pipeline, error) {
|
||||
return p.CreateTransformerToPCS(p.Header.RenderingIntent, input_channels, true)
|
||||
}
|
||||
|
||||
func newProfile() *Profile {
|
||||
return &Profile{
|
||||
TagTable: emptyTagTable(),
|
||||
TagTable: emptyTagTable(),
|
||||
blackpoints: make(map[RenderingIntent]*XYZType),
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively generates all points in an m-dimensional hypercube.
|
||||
// currentPoint stores the coordinates of the current point being built.
|
||||
// dimension is the current dimension being processed (from 0 to m-1).
|
||||
// m is the total number of dimensions.
|
||||
// n is the number of points per dimension (0 to n-1).
|
||||
func iterate_hypercube(currentPoint []int, dimension, m, n int, callback func([]int)) {
|
||||
// Base case: If all dimensions have been assigned, print the point.
|
||||
if dimension == m {
|
||||
callback(currentPoint)
|
||||
return
|
||||
}
|
||||
|
||||
// Recursive step: Iterate through all possible values for the current dimension.
|
||||
for i := range n {
|
||||
currentPoint[dimension] = i // Assign value to the current dimension
|
||||
// Recursively call for the next dimension
|
||||
iterate_hypercube(currentPoint, dimension+1, m, n, callback)
|
||||
}
|
||||
}
|
||||
|
||||
func points_for_transformer_comparison(input_channels, num_points_per_input_channel int) []unit_float {
|
||||
m, n := input_channels, num_points_per_input_channel
|
||||
sz := input_channels // n ** m * m
|
||||
for range m {
|
||||
sz *= n
|
||||
}
|
||||
ans := make([]unit_float, 0, sz)
|
||||
current_point := make([]int, input_channels)
|
||||
factor := 1 / unit_float(num_points_per_input_channel-1)
|
||||
iterate_hypercube(current_point, 0, m, n, func(p []int) {
|
||||
for _, x := range current_point {
|
||||
ans = append(ans, unit_float(x)*factor)
|
||||
}
|
||||
})
|
||||
if len(ans) != sz {
|
||||
panic(fmt.Sprintf("insufficient points: wanted %d, got %d", sz, len(ans)))
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
var Points_for_transformer_comparison3 = sync.OnceValue(func() []unit_float {
|
||||
return points_for_transformer_comparison(3, 16)
|
||||
})
|
||||
var Points_for_transformer_comparison4 = sync.OnceValue(func() []unit_float {
|
||||
return points_for_transformer_comparison(4, 16)
|
||||
})
|
||||
|
||||
func DecodeProfile(r io.Reader) (ans *Profile, err error) {
|
||||
return NewProfileReader(r).ReadProfile()
|
||||
}
|
||||
|
||||
func ReadProfile(path string) (ans *Profile, err error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
ans, err = NewProfileReader(bytes.NewReader(data)).ReadProfile()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
11
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/profilereader.go
generated
vendored
11
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/profilereader.go
generated
vendored
@@ -30,6 +30,7 @@ func (pr *ProfileReader) ReadProfile() (p *Profile, err error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reader header from ICC profile: %w", err)
|
||||
}
|
||||
profile.PCSIlluminant = profile.Header.ParsedPCSIlluminant()
|
||||
|
||||
err = pr.readTagTable(&profile.TagTable)
|
||||
if err != nil {
|
||||
@@ -51,6 +52,12 @@ func (pr *ProfileReader) readHeader(header *Header) (err error) {
|
||||
if n != len(data) {
|
||||
return fmt.Errorf("decoding header consumed %d instead of %d bytes", n, len(data))
|
||||
}
|
||||
if header.ProfileConnectionSpace != ColorSpaceXYZ && header.ProfileConnectionSpace != ColorSpaceLab {
|
||||
return fmt.Errorf("unsupported profile connection space colorspace: %s", header.ProfileConnectionSpace)
|
||||
}
|
||||
if header.DataColorSpace != ColorSpaceRGB && header.DataColorSpace != ColorSpaceCMYK {
|
||||
return fmt.Errorf("unsupported device colorspace: %s", header.DataColorSpace)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -62,7 +69,7 @@ func (pr *ProfileReader) readTagTable(tagTable *TagTable) (err error) {
|
||||
return
|
||||
}
|
||||
type tagIndexEntry struct {
|
||||
Sig uint32
|
||||
Sig Signature
|
||||
Offset uint32
|
||||
Size uint32
|
||||
}
|
||||
@@ -83,7 +90,7 @@ func (pr *ProfileReader) readTagTable(tagTable *TagTable) (err error) {
|
||||
for _, t := range tag_indices {
|
||||
startOffset := t.Offset - tagDataOffset
|
||||
endOffset := startOffset + t.Size
|
||||
tagTable.add(Signature(t.Sig), tagData[startOffset:endOffset])
|
||||
tagTable.add(t.Sig, int(startOffset), tagData[startOffset:endOffset])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/renderingintent.go
generated
vendored
4
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/renderingintent.go
generated
vendored
@@ -16,11 +16,11 @@ func (ri RenderingIntent) String() string {
|
||||
case PerceptualRenderingIntent:
|
||||
return "Perceptual"
|
||||
case RelativeColorimetricRenderingIntent:
|
||||
return "Relative colorimetric"
|
||||
return "Relative"
|
||||
case SaturationRenderingIntent:
|
||||
return "Saturation"
|
||||
case AbsoluteColorimetricRenderingIntent:
|
||||
return "Absolute colorimetric"
|
||||
return "Absolute"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown (%d)", ri)
|
||||
}
|
||||
|
||||
3
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/signature.go
generated
vendored
3
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/signature.go
generated
vendored
@@ -3,6 +3,7 @@ package icc
|
||||
type Signature uint32
|
||||
|
||||
const (
|
||||
UnknownSignature Signature = 0
|
||||
ProfileFileSignature Signature = 0x61637370 // 'acsp'
|
||||
TextTagSignature Signature = 0x74657874 // 'text'
|
||||
SignateTagSignature Signature = 0x73696720 // 'sig '
|
||||
@@ -105,12 +106,14 @@ const (
|
||||
AToB0TagSignature Signature = 0x41324230 /* 'A2B0' */
|
||||
AToB1TagSignature Signature = 0x41324231 /* 'A2B1' */
|
||||
AToB2TagSignature Signature = 0x41324232 /* 'A2B2' */
|
||||
AToB3TagSignature Signature = 0x41324233 /* 'A2B3' */
|
||||
BlueColorantTagSignature Signature = 0x6258595A /* 'bXYZ' */
|
||||
BlueMatrixColumnTagSignature Signature = 0x6258595A /* 'bXYZ' */
|
||||
BlueTRCTagSignature Signature = 0x62545243 /* 'bTRC' */
|
||||
BToA0TagSignature Signature = 0x42324130 /* 'B2A0' */
|
||||
BToA1TagSignature Signature = 0x42324131 /* 'B2A1' */
|
||||
BToA2TagSignature Signature = 0x42324132 /* 'B2A2' */
|
||||
BToA3TagSignature Signature = 0x42324133 /* 'B2A3' */
|
||||
CalibrationDateTimeTagSignature Signature = 0x63616C74 /* 'calt' */
|
||||
CharTargetTagSignature Signature = 0x74617267 /* 'targ' */
|
||||
ChromaticAdaptationTagSignature Signature = 0x63686164 /* 'chad' */
|
||||
|
||||
2
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tag_description.go
generated
vendored
2
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tag_description.go
generated
vendored
@@ -147,7 +147,7 @@ func mlucDecoder(raw []byte) (any, error) {
|
||||
return nil, fmt.Errorf("mluc tag too small for %d records", count)
|
||||
}
|
||||
tag := &MultiLocalizedTag{Strings: make([]LocalizedString, 0, count)}
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
base := 16 + i*recordSize
|
||||
langCode := string(raw[base : base+2])
|
||||
countryCode := string(raw[base+2 : base+4])
|
||||
|
||||
219
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_clut.go
generated
vendored
219
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_clut.go
generated
vendored
@@ -4,57 +4,112 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// CLUTTag represents a color lookup table tag (TagColorLookupTable)
|
||||
type CLUTTag struct {
|
||||
GridPoints []uint8 // e.g., [17,17,17] for 3D CLUT
|
||||
InputChannels int
|
||||
OutputChannels int
|
||||
Values []float64 // flattened [in1, in2, ..., out1, out2, ...]
|
||||
// TrilinearInterpolate represents a color lookup table tag (TagColorLookupTable)
|
||||
type TrilinearInterpolate struct {
|
||||
d *interpolation_data
|
||||
legacy bool
|
||||
}
|
||||
|
||||
var _ ChannelTransformer = (*CLUTTag)(nil)
|
||||
type TetrahedralInterpolate struct {
|
||||
d *interpolation_data
|
||||
legacy bool
|
||||
}
|
||||
|
||||
type CLUT interface {
|
||||
ChannelTransformer
|
||||
Samples() []unit_float
|
||||
}
|
||||
|
||||
func (c *TrilinearInterpolate) Samples() []unit_float { return c.d.samples }
|
||||
func (c *TetrahedralInterpolate) Samples() []unit_float { return c.d.samples }
|
||||
|
||||
func (c TetrahedralInterpolate) String() string {
|
||||
return fmt.Sprintf("TetrahedralInterpolate{ inp:%v outp:%v grid:%v values[:9]:%v }", c.d.num_inputs, c.d.num_outputs, c.d.grid_points, c.d.samples[:min(9, len(c.d.samples))])
|
||||
}
|
||||
|
||||
func (c TrilinearInterpolate) String() string {
|
||||
return fmt.Sprintf("TrilinearInterpolate{ inp:%v outp:%v grid:%v values[:9]:%v }", c.d.num_inputs, c.d.num_outputs, c.d.grid_points, c.d.samples[:min(9, len(c.d.samples))])
|
||||
}
|
||||
|
||||
var _ CLUT = (*TrilinearInterpolate)(nil)
|
||||
var _ CLUT = (*TetrahedralInterpolate)(nil)
|
||||
|
||||
func decode_clut_table8(raw []byte, ans []unit_float) {
|
||||
for i, x := range raw {
|
||||
ans[i] = unit_float(x) / math.MaxUint8
|
||||
}
|
||||
}
|
||||
|
||||
func decode_clut_table16(raw []byte, ans []unit_float) {
|
||||
raw = raw[:2*len(ans)]
|
||||
const inv = 1. / math.MaxUint16
|
||||
for i := range ans {
|
||||
val := binary.BigEndian.Uint16(raw)
|
||||
ans[i] = unit_float(val) * inv
|
||||
raw = raw[2:]
|
||||
}
|
||||
}
|
||||
|
||||
func decode_clut_table(raw []byte, bytes_per_channel, OutputChannels int, grid_points []int, output_colorspace ColorSpace) (ans []unit_float, consumed int, err error) {
|
||||
expected_num_of_output_channels := 3
|
||||
switch output_colorspace {
|
||||
case ColorSpaceCMYK:
|
||||
expected_num_of_output_channels = 4
|
||||
}
|
||||
if expected_num_of_output_channels != OutputChannels {
|
||||
return nil, 0, fmt.Errorf("CLUT table number of output channels %d inappropriate for output_colorspace: %s", OutputChannels, output_colorspace)
|
||||
}
|
||||
expected_num_of_values := expectedValues(grid_points, OutputChannels)
|
||||
consumed = bytes_per_channel * expected_num_of_values
|
||||
if len(raw) < consumed {
|
||||
return nil, 0, fmt.Errorf("CLUT table too short %d < %d", len(raw), bytes_per_channel*expected_num_of_values)
|
||||
}
|
||||
ans = make([]unit_float, expected_num_of_values)
|
||||
if bytes_per_channel == 1 {
|
||||
decode_clut_table8(raw[:consumed], ans)
|
||||
} else {
|
||||
decode_clut_table16(raw[:consumed], ans)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func make_clut(grid_points []int, num_inputs, num_outputs int, samples []unit_float, legacy, prefer_trilinear bool) CLUT {
|
||||
if num_inputs >= 3 && !prefer_trilinear {
|
||||
return &TetrahedralInterpolate{make_interpolation_data(num_inputs, num_outputs, grid_points, samples), legacy}
|
||||
}
|
||||
return &TrilinearInterpolate{make_interpolation_data(num_inputs, num_outputs, grid_points, samples), legacy}
|
||||
}
|
||||
|
||||
// section 10.12.3 (CLUT) in ICC.1-2202-05.pdf
|
||||
func embeddedClutDecoder(raw []byte, InputChannels, OutputChannels int) (any, error) {
|
||||
func embeddedClutDecoder(raw []byte, InputChannels, OutputChannels int, output_colorspace ColorSpace, prefer_trilinear bool) (any, error) {
|
||||
if len(raw) < 20 {
|
||||
return nil, errors.New("clut tag too short")
|
||||
}
|
||||
gridPoints := make([]uint8, InputChannels)
|
||||
copy(gridPoints, raw[:InputChannels])
|
||||
if InputChannels > 4 {
|
||||
return nil, fmt.Errorf("clut supports at most 4 input channels not: %d", InputChannels)
|
||||
}
|
||||
gridPoints := make([]int, InputChannels)
|
||||
for i, b := range raw[:InputChannels] {
|
||||
gridPoints[i] = int(b)
|
||||
}
|
||||
for i, nPoints := range gridPoints {
|
||||
if nPoints < 2 {
|
||||
return nil, fmt.Errorf("CLUT input channel %d has invalid grid points: %d", i, nPoints)
|
||||
}
|
||||
}
|
||||
bytes_per_channel := raw[16]
|
||||
raw = raw[20:]
|
||||
// expected size: (product of grid points) * output channels * bytes_per_channel
|
||||
expected_num_of_values := expectedValues(gridPoints, OutputChannels)
|
||||
values := make([]float64, expected_num_of_values)
|
||||
if len(values)*int(bytes_per_channel) > len(raw) {
|
||||
return nil, fmt.Errorf("CLUT unexpected body length: expected %d, got %d", expected_num_of_values*int(bytes_per_channel), len(raw))
|
||||
values, _, err := decode_clut_table(raw, int(bytes_per_channel), OutputChannels, gridPoints, output_colorspace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch bytes_per_channel {
|
||||
case 1:
|
||||
for i, b := range raw[:len(values)] {
|
||||
values[i] = float64(b) / 255
|
||||
}
|
||||
case 2:
|
||||
for i := range len(values) {
|
||||
values[i] = float64(binary.BigEndian.Uint16(raw[i*2:i*2+2])) / 65535
|
||||
}
|
||||
}
|
||||
ans := &CLUTTag{
|
||||
GridPoints: gridPoints,
|
||||
InputChannels: InputChannels,
|
||||
OutputChannels: OutputChannels,
|
||||
Values: values,
|
||||
}
|
||||
if ans.InputChannels > 6 {
|
||||
return nil, fmt.Errorf("unsupported num of CLUT input channels: %d", ans.InputChannels)
|
||||
}
|
||||
return ans, nil
|
||||
return make_clut(gridPoints, InputChannels, OutputChannels, values, false, prefer_trilinear), nil
|
||||
}
|
||||
|
||||
func expectedValues(gridPoints []uint8, outputChannels int) int {
|
||||
func expectedValues(gridPoints []int, outputChannels int) int {
|
||||
expectedPoints := 1
|
||||
for _, g := range gridPoints {
|
||||
expectedPoints *= int(g)
|
||||
@@ -62,79 +117,39 @@ func expectedValues(gridPoints []uint8, outputChannels int) int {
|
||||
return expectedPoints * outputChannels
|
||||
}
|
||||
|
||||
func (c *CLUTTag) WorkspaceSize() int { return 16 }
|
||||
func (c *TrilinearInterpolate) IOSig() (int, int) { return c.d.num_inputs, c.d.num_outputs }
|
||||
func (c *TetrahedralInterpolate) IOSig() (int, int) { return c.d.num_inputs, c.d.num_outputs }
|
||||
func (c *TrilinearInterpolate) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *TetrahedralInterpolate) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
|
||||
func (c *CLUTTag) IsSuitableFor(num_input_channels, num_output_channels int) bool {
|
||||
return num_input_channels == int(c.InputChannels) && num_output_channels == c.OutputChannels
|
||||
func (c *TrilinearInterpolate) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
var obuf [3]unit_float
|
||||
var ibuf = [3]unit_float{r, g, b}
|
||||
c.d.trilinear_interpolate(ibuf[:], obuf[:])
|
||||
return obuf[0], obuf[1], obuf[2]
|
||||
}
|
||||
func (m *TrilinearInterpolate) TransformGeneral(o, i []unit_float) {
|
||||
o = o[0:m.d.num_outputs:m.d.num_outputs]
|
||||
for i := range o {
|
||||
o[i] = 0
|
||||
}
|
||||
m.d.trilinear_interpolate(i[0:m.d.num_inputs:m.d.num_inputs], o)
|
||||
}
|
||||
|
||||
func (c *CLUTTag) Transform(output, workspace []float64, inputs ...float64) error {
|
||||
return c.Lookup(output, workspace, inputs)
|
||||
func (c *TetrahedralInterpolate) Tetrahedral_interpolate(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
var obuf [3]unit_float
|
||||
c.d.tetrahedral_interpolation(r, g, b, obuf[:])
|
||||
return obuf[0], obuf[1], obuf[2]
|
||||
}
|
||||
|
||||
func (c *CLUTTag) Lookup(output, workspace, inputs []float64) error {
|
||||
// clamp input values to 0-1...
|
||||
clamped := workspace[:len(inputs)]
|
||||
for i, v := range inputs {
|
||||
clamped[i] = clamp01(v)
|
||||
}
|
||||
// find the grid positions and interpolation factors...
|
||||
gridFrac := workspace[len(clamped) : 2*len(clamped)]
|
||||
var buf [4]int
|
||||
gridPos := buf[:]
|
||||
for i, v := range clamped {
|
||||
nPoints := int(c.GridPoints[i])
|
||||
if nPoints < 2 {
|
||||
return fmt.Errorf("CLUT input channel %d has invalid grid points: %d", i, nPoints)
|
||||
}
|
||||
pos := v * float64(nPoints-1)
|
||||
gridPos[i] = int(pos)
|
||||
if gridPos[i] >= nPoints-1 {
|
||||
gridPos[i] = nPoints - 2 // clamp
|
||||
gridFrac[i] = 1.0
|
||||
} else {
|
||||
gridFrac[i] = pos - float64(gridPos[i])
|
||||
}
|
||||
}
|
||||
// perform multi-dimensional interpolation (recursive)...
|
||||
return c.triLinearInterpolate(output[:c.OutputChannels], gridPos, gridFrac)
|
||||
func (c *TetrahedralInterpolate) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.Tetrahedral_interpolate(r, g, b)
|
||||
}
|
||||
|
||||
func (c *CLUTTag) triLinearInterpolate(out []float64, gridPos []int, gridFrac []float64) error {
|
||||
numCorners := 1 << c.InputChannels // 2^inputs
|
||||
for o := range c.OutputChannels {
|
||||
out[o] = 0
|
||||
}
|
||||
// walk all corners of the hypercube
|
||||
for corner := range numCorners {
|
||||
weight := 1.0
|
||||
idx := 0
|
||||
stride := 1
|
||||
for dim := c.InputChannels - 1; dim >= 0; dim-- {
|
||||
bit := (corner >> dim) & 1
|
||||
pos := gridPos[dim] + bit
|
||||
if pos >= int(c.GridPoints[dim]) {
|
||||
return fmt.Errorf("CLUT corner position out of bounds at dimension %d", dim)
|
||||
}
|
||||
idx += pos * stride
|
||||
stride *= int(c.GridPoints[dim])
|
||||
if bit == 0 {
|
||||
weight *= 1 - gridFrac[dim]
|
||||
} else {
|
||||
weight *= gridFrac[dim]
|
||||
}
|
||||
}
|
||||
base := idx * c.OutputChannels
|
||||
if base+c.OutputChannels > len(c.Values) {
|
||||
return errors.New("CLUT value index out of bounds")
|
||||
}
|
||||
for o := range c.OutputChannels {
|
||||
out[o] += weight * c.Values[base+o]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
func (m *TetrahedralInterpolate) TransformGeneral(o, i []unit_float) {
|
||||
m.d.tetrahedral_interpolation4(i[0], i[1], i[2], i[3], o[:m.d.num_outputs:m.d.num_outputs])
|
||||
}
|
||||
|
||||
func clamp01(v float64) float64 {
|
||||
func clamp01(v unit_float) unit_float {
|
||||
return max(0, min(v, 1))
|
||||
}
|
||||
|
||||
668
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_curve.go
generated
vendored
Normal file
668
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_curve.go
generated
vendored
Normal file
@@ -0,0 +1,668 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type IdentityCurve int
|
||||
type GammaCurve struct {
|
||||
gamma, inv_gamma unit_float
|
||||
is_one bool
|
||||
}
|
||||
type PointsCurve struct {
|
||||
points, reverse_lookup []unit_float
|
||||
max_idx, reverse_max_idx unit_float
|
||||
}
|
||||
type ConditionalZeroCurve struct{ g, a, b, threshold, inv_gamma, inv_a unit_float }
|
||||
type ConditionalCCurve struct{ g, a, b, c, threshold, inv_gamma, inv_a unit_float }
|
||||
type SplitCurve struct{ g, a, b, c, d, inv_g, inv_a, inv_c, threshold unit_float }
|
||||
type ComplexCurve struct{ g, a, b, c, d, e, f, inv_g, inv_a, inv_c, threshold unit_float }
|
||||
type Curve1D interface {
|
||||
Transform(x unit_float) unit_float
|
||||
InverseTransform(x unit_float) unit_float
|
||||
Prepare() error
|
||||
String() string
|
||||
}
|
||||
|
||||
var _ Curve1D = (*IdentityCurve)(nil)
|
||||
var _ Curve1D = (*GammaCurve)(nil)
|
||||
var _ Curve1D = (*PointsCurve)(nil)
|
||||
var _ Curve1D = (*ConditionalZeroCurve)(nil)
|
||||
var _ Curve1D = (*ConditionalCCurve)(nil)
|
||||
var _ Curve1D = (*SplitCurve)(nil)
|
||||
var _ Curve1D = (*ComplexCurve)(nil)
|
||||
|
||||
type CurveTransformer struct {
|
||||
curves []Curve1D
|
||||
name string
|
||||
}
|
||||
type InverseCurveTransformer struct {
|
||||
curves []Curve1D
|
||||
name string
|
||||
}
|
||||
|
||||
func (c CurveTransformer) IOSig() (int, int) {
|
||||
return len(c.curves), len(c.curves)
|
||||
}
|
||||
|
||||
func (c CurveTransformer) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.curves[0].Transform(r), c.curves[1].Transform(g), c.curves[2].Transform(b)
|
||||
}
|
||||
func (c CurveTransformer) TransformGeneral(o, i []unit_float) {
|
||||
for n, c := range c.curves {
|
||||
o[n] = c.Transform(i[n])
|
||||
}
|
||||
}
|
||||
|
||||
func (c InverseCurveTransformer) IOSig() (int, int) {
|
||||
return len(c.curves), len(c.curves)
|
||||
}
|
||||
func (c InverseCurveTransformer) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
// we need to clamp as per spec section F.3 of ICC.1-2202-05.pdf
|
||||
return c.curves[0].InverseTransform(clamp01(r)), c.curves[1].InverseTransform(clamp01(g)), c.curves[2].InverseTransform(clamp01(b))
|
||||
}
|
||||
func (c InverseCurveTransformer) TransformGeneral(o, i []unit_float) {
|
||||
for n, c := range c.curves {
|
||||
o[n] = c.InverseTransform(i[n])
|
||||
}
|
||||
}
|
||||
|
||||
type CurveTransformer3 struct {
|
||||
r, g, b Curve1D
|
||||
name string
|
||||
}
|
||||
|
||||
func (c CurveTransformer3) IOSig() (int, int) { return 3, 3 }
|
||||
func (c CurveTransformer3) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return c.r.Transform(r), c.g.Transform(g), c.b.Transform(b)
|
||||
}
|
||||
func (m CurveTransformer3) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
type InverseCurveTransformer3 struct {
|
||||
r, g, b Curve1D
|
||||
name string
|
||||
}
|
||||
|
||||
func (c *CurveTransformer) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *CurveTransformer3) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *InverseCurveTransformer) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *InverseCurveTransformer3) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *CurveTransformer) Curves() []Curve1D { return c.curves }
|
||||
func (c *InverseCurveTransformer) Curves() []Curve1D { return c.curves }
|
||||
func (c *CurveTransformer3) Curves() []Curve1D { return []Curve1D{c.r, c.g, c.b} }
|
||||
func (c *InverseCurveTransformer3) Curves() []Curve1D { return []Curve1D{c.r, c.g, c.b} }
|
||||
|
||||
func curve_string(name string, is_inverse bool, curves ...Curve1D) string {
|
||||
var b strings.Builder
|
||||
if is_inverse {
|
||||
name += "Inverted"
|
||||
}
|
||||
b.WriteString(name + "{")
|
||||
for i, c := range curves {
|
||||
b.WriteString(fmt.Sprintf("[%d]%s ", i, c.String()))
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (c CurveTransformer3) String() string { return curve_string(c.name, false, c.r, c.g, c.b) }
|
||||
func (c CurveTransformer) String() string { return curve_string(c.name, false, c.curves...) }
|
||||
func (c InverseCurveTransformer3) String() string { return curve_string(c.name, true, c.r, c.g, c.b) }
|
||||
func (c InverseCurveTransformer) String() string { return curve_string(c.name, true, c.curves...) }
|
||||
|
||||
func (c InverseCurveTransformer3) IOSig() (int, int) { return 3, 3 }
|
||||
func (c InverseCurveTransformer3) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
// we need to clamp as per spec section F.3 of ICC.1-2202-05.pdf
|
||||
return c.r.InverseTransform(clamp01(r)), c.g.InverseTransform(clamp01(g)), c.b.InverseTransform(clamp01(b))
|
||||
}
|
||||
func (m InverseCurveTransformer3) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
type Curves interface {
|
||||
ChannelTransformer
|
||||
Curves() []Curve1D
|
||||
}
|
||||
|
||||
func NewCurveTransformer(name string, curves ...Curve1D) Curves {
|
||||
all_identity := true
|
||||
for _, c := range curves {
|
||||
if c == nil {
|
||||
ident := IdentityCurve(0)
|
||||
c = &ident
|
||||
}
|
||||
if _, is_ident := c.(*IdentityCurve); !is_ident {
|
||||
all_identity = false
|
||||
}
|
||||
}
|
||||
if all_identity {
|
||||
return nil
|
||||
}
|
||||
switch len(curves) {
|
||||
case 3:
|
||||
return &CurveTransformer3{curves[0], curves[1], curves[2], name}
|
||||
default:
|
||||
return &CurveTransformer{curves, name}
|
||||
}
|
||||
}
|
||||
func NewInverseCurveTransformer(name string, curves ...Curve1D) Curves {
|
||||
all_identity := true
|
||||
for _, c := range curves {
|
||||
if c == nil {
|
||||
ident := IdentityCurve(0)
|
||||
c = &ident
|
||||
}
|
||||
if _, is_ident := c.(*IdentityCurve); !is_ident {
|
||||
all_identity = false
|
||||
}
|
||||
}
|
||||
if all_identity {
|
||||
return nil
|
||||
}
|
||||
switch len(curves) {
|
||||
case 3:
|
||||
return &InverseCurveTransformer3{curves[0], curves[1], curves[2], name}
|
||||
default:
|
||||
return &InverseCurveTransformer{curves, name}
|
||||
}
|
||||
}
|
||||
|
||||
type ParametricCurveFunction uint16
|
||||
|
||||
const (
|
||||
SimpleGammaFunction ParametricCurveFunction = 0 // Y = X^g
|
||||
ConditionalZeroFunction ParametricCurveFunction = 1 // Y = (aX+b)^g for X >= d, else 0
|
||||
ConditionalCFunction ParametricCurveFunction = 2 // Y = (aX+b)^g for X >= d, else c
|
||||
SplitFunction ParametricCurveFunction = 3 // Two different functions split at d
|
||||
ComplexFunction ParametricCurveFunction = 4 // More complex piecewise function
|
||||
)
|
||||
|
||||
func align_to_4(x int) int {
|
||||
if extra := x % 4; extra > 0 {
|
||||
x += 4 - extra
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func fixed88ToFloat(raw []byte) unit_float {
|
||||
return unit_float(uint16(raw[0])<<8|uint16(raw[1])) / 256
|
||||
}
|
||||
|
||||
func samples_to_analytic(points []unit_float) Curve1D {
|
||||
threshold := 1e-3
|
||||
switch {
|
||||
case len(points) < 2:
|
||||
return nil
|
||||
case len(points) > 400:
|
||||
threshold = FLOAT_EQUALITY_THRESHOLD
|
||||
case len(points) > 100:
|
||||
threshold = 2 * FLOAT_EQUALITY_THRESHOLD
|
||||
case len(points) > 40:
|
||||
threshold = 16 * FLOAT_EQUALITY_THRESHOLD
|
||||
}
|
||||
if len(points) < 2 {
|
||||
return nil
|
||||
}
|
||||
n := 1 / unit_float(len(points)-1)
|
||||
srgb := SRGBCurve().Transform
|
||||
is_srgb, is_identity := true, true
|
||||
for i, y := range points {
|
||||
x := unit_float(i) * n
|
||||
if is_srgb {
|
||||
is_srgb = math.Abs(float64(y-srgb(x))) <= threshold
|
||||
}
|
||||
if is_identity {
|
||||
is_identity = math.Abs(float64(y-x)) <= threshold
|
||||
}
|
||||
if !is_identity && !is_srgb {
|
||||
break
|
||||
}
|
||||
}
|
||||
if is_identity {
|
||||
ans := IdentityCurve(0)
|
||||
return &ans
|
||||
}
|
||||
if is_srgb {
|
||||
return SRGBCurve()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func load_points_curve(fp []unit_float) (Curve1D, error) {
|
||||
analytic := samples_to_analytic(fp)
|
||||
if analytic != nil {
|
||||
return analytic, nil
|
||||
}
|
||||
c := &PointsCurve{points: fp}
|
||||
if err := c.Prepare(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func embeddedCurveDecoder(raw []byte) (any, int, error) {
|
||||
if len(raw) < 12 {
|
||||
return nil, 0, errors.New("curv tag too short")
|
||||
}
|
||||
count := int(binary.BigEndian.Uint32(raw[8:12]))
|
||||
consumed := align_to_4(12 + count*2)
|
||||
switch count {
|
||||
case 0:
|
||||
c := IdentityCurve(0)
|
||||
return &c, consumed, nil
|
||||
case 1:
|
||||
if len(raw) < 14 {
|
||||
return nil, 0, errors.New("curv tag missing gamma value")
|
||||
}
|
||||
g := &GammaCurve{gamma: fixed88ToFloat(raw[12:14])}
|
||||
if err := g.Prepare(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var c Curve1D = g
|
||||
if g.is_one {
|
||||
ic := IdentityCurve(0)
|
||||
c = &ic
|
||||
}
|
||||
return c, consumed, nil
|
||||
default:
|
||||
points := make([]uint16, count)
|
||||
_, err := binary.Decode(raw[12:], binary.BigEndian, points)
|
||||
if err != nil {
|
||||
return nil, 0, errors.New("curv tag truncated")
|
||||
}
|
||||
fp := make([]unit_float, len(points))
|
||||
for i, p := range points {
|
||||
fp[i] = unit_float(p) / math.MaxUint16
|
||||
}
|
||||
c, err := load_points_curve(fp)
|
||||
return c, consumed, err
|
||||
}
|
||||
}
|
||||
|
||||
func curveDecoder(raw []byte) (any, error) {
|
||||
ans, _, err := embeddedCurveDecoder(raw)
|
||||
return ans, err
|
||||
}
|
||||
|
||||
func readS15Fixed16BE(raw []byte) unit_float {
|
||||
msb := int16(raw[0])<<8 | int16(raw[1])
|
||||
lsb := uint16(raw[2])<<8 | uint16(raw[3])
|
||||
return unit_float(msb) + unit_float(lsb)/(1<<16)
|
||||
}
|
||||
|
||||
func embeddedParametricCurveDecoder(raw []byte) (ans any, consumed int, err error) {
|
||||
block_len := len(raw)
|
||||
if block_len < 16 {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
funcType := ParametricCurveFunction(binary.BigEndian.Uint16(raw[8:10]))
|
||||
const header_len = 12
|
||||
raw = raw[header_len:]
|
||||
p := func() unit_float {
|
||||
ans := readS15Fixed16BE(raw[:4])
|
||||
raw = raw[4:]
|
||||
return ans
|
||||
}
|
||||
defer func() { consumed = align_to_4(consumed) }()
|
||||
var c Curve1D
|
||||
|
||||
switch funcType {
|
||||
case SimpleGammaFunction:
|
||||
if consumed = header_len + 4; block_len < consumed {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
g := &GammaCurve{gamma: p()}
|
||||
if abs(g.gamma-1) < FLOAT_EQUALITY_THRESHOLD {
|
||||
ic := IdentityCurve(0)
|
||||
c = &ic
|
||||
} else {
|
||||
c = g
|
||||
}
|
||||
case ConditionalZeroFunction:
|
||||
if consumed = header_len + 3*4; block_len < consumed {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
c = &ConditionalZeroCurve{g: p(), a: p(), b: p()}
|
||||
case ConditionalCFunction:
|
||||
if consumed = header_len + 4*4; block_len < consumed {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
c = &ConditionalCCurve{g: p(), a: p(), b: p(), c: p()}
|
||||
case SplitFunction:
|
||||
if consumed = header_len + 5*4; block_len < consumed {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
c = &SplitCurve{g: p(), a: p(), b: p(), c: p(), d: p()}
|
||||
case ComplexFunction:
|
||||
if consumed = header_len + 7*4; block_len < consumed {
|
||||
return nil, 0, errors.New("para tag too short")
|
||||
}
|
||||
c = &ComplexCurve{g: p(), a: p(), b: p(), c: p(), d: p(), e: p(), f: p()}
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("unknown parametric function type: %d", funcType)
|
||||
}
|
||||
if err = c.Prepare(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return c, consumed, nil
|
||||
|
||||
}
|
||||
|
||||
func parametricCurveDecoder(raw []byte) (any, error) {
|
||||
ans, _, err := embeddedParametricCurveDecoder(raw)
|
||||
return ans, err
|
||||
}
|
||||
|
||||
func (c IdentityCurve) Transform(x unit_float) unit_float {
|
||||
return x
|
||||
}
|
||||
|
||||
func (c IdentityCurve) InverseTransform(x unit_float) unit_float {
|
||||
return x
|
||||
}
|
||||
|
||||
func (c IdentityCurve) Prepare() error { return nil }
|
||||
func (c IdentityCurve) String() string { return "IdentityCurve" }
|
||||
|
||||
func (c GammaCurve) Transform(x unit_float) unit_float {
|
||||
if x < 0 {
|
||||
if c.is_one {
|
||||
return x
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return pow(x, c.gamma)
|
||||
}
|
||||
|
||||
func (c GammaCurve) InverseTransform(x unit_float) unit_float {
|
||||
if x < 0 {
|
||||
if c.is_one {
|
||||
return x
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return pow(x, c.inv_gamma)
|
||||
}
|
||||
|
||||
func (c *GammaCurve) Prepare() error {
|
||||
if c.gamma == 0 {
|
||||
return fmt.Errorf("gamma curve has zero gamma value")
|
||||
}
|
||||
c.inv_gamma = 1 / c.gamma
|
||||
c.is_one = abs(c.gamma-1) < FLOAT_EQUALITY_THRESHOLD
|
||||
return nil
|
||||
}
|
||||
func (c GammaCurve) String() string { return fmt.Sprintf("GammaCurve{%f}", c.gamma) }
|
||||
|
||||
func calculate_reverse_for_well_behaved_sampled_curve(points []unit_float) []unit_float {
|
||||
n := len(points) - 1
|
||||
if n < 1 {
|
||||
return nil
|
||||
}
|
||||
var prev, maxy unit_float
|
||||
var miny unit_float = math.MaxFloat32
|
||||
for _, y := range points {
|
||||
if y < prev || y < 0 || y > 1 {
|
||||
return nil // not monotonic or range not in [0, 1]
|
||||
}
|
||||
prev = y
|
||||
miny = min(y, miny)
|
||||
maxy = max(y, maxy)
|
||||
}
|
||||
y_to_x := make([]unit_float, n+1)
|
||||
points_y_idx := 0
|
||||
n_inv := 1.0 / unit_float(n)
|
||||
for i := range y_to_x {
|
||||
if points_y_idx > n {
|
||||
// we are between maxy and 1
|
||||
y_to_x[i] = 1
|
||||
continue
|
||||
}
|
||||
if int(points[points_y_idx]*unit_float(n)) == i {
|
||||
y_to_x[i] = unit_float(points_y_idx) * n_inv
|
||||
for {
|
||||
points_y_idx++
|
||||
if points_y_idx > n || int(points[points_y_idx]*unit_float(n)) != i {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if points_y_idx == 0 {
|
||||
// we are between 0 and miny
|
||||
y_to_x[i] = 0
|
||||
continue
|
||||
}
|
||||
// we are between points_y_idx-1 and points_y_idx
|
||||
y1, y2 := points[points_y_idx-1], points[points_y_idx]
|
||||
if y1 == 1 {
|
||||
y_to_x[i] = 1
|
||||
continue
|
||||
}
|
||||
for y1 == y2 {
|
||||
points_y_idx++
|
||||
y2 = 1
|
||||
if points_y_idx <= n {
|
||||
y2 = points[points_y_idx]
|
||||
}
|
||||
}
|
||||
y := unit_float(i) * n_inv
|
||||
frac := (y - y1) / (y2 - y1)
|
||||
x1 := unit_float(points_y_idx-1) * n_inv
|
||||
// x = x1 + frac * (x2 - x1)
|
||||
y_to_x[i] = x1 + frac*n_inv
|
||||
}
|
||||
}
|
||||
return y_to_x
|
||||
}
|
||||
|
||||
func (c *PointsCurve) Prepare() error {
|
||||
c.max_idx = unit_float(len(c.points) - 1)
|
||||
reverse_lookup := calculate_reverse_for_well_behaved_sampled_curve(c.points)
|
||||
if reverse_lookup == nil {
|
||||
reverse_lookup = make([]unit_float, len(c.points))
|
||||
for i := range len(reverse_lookup) {
|
||||
y := unit_float(i) / unit_float(len(reverse_lookup)-1)
|
||||
idx := get_interval(c.points, y)
|
||||
if idx < 0 {
|
||||
reverse_lookup[i] = 0
|
||||
} else {
|
||||
y1, y2 := c.points[idx], c.points[idx+1]
|
||||
if y2 < y1 {
|
||||
y1, y2 = y2, y1
|
||||
}
|
||||
x1, x2 := unit_float(idx)/c.max_idx, unit_float(idx+1)/c.max_idx
|
||||
frac := (y - y1) / (y2 - y1)
|
||||
reverse_lookup[i] = x1 + frac*(x2-x1)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.reverse_lookup = reverse_lookup
|
||||
c.reverse_max_idx = unit_float(len(reverse_lookup) - 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c PointsCurve) Transform(v unit_float) unit_float {
|
||||
return sampled_value(c.points, c.max_idx, v)
|
||||
}
|
||||
|
||||
func (c PointsCurve) InverseTransform(v unit_float) unit_float {
|
||||
return sampled_value(c.reverse_lookup, c.reverse_max_idx, v)
|
||||
}
|
||||
func (c PointsCurve) String() string { return fmt.Sprintf("PointsCurve{%d}", len(c.points)) }
|
||||
|
||||
func get_interval(lookup []unit_float, y unit_float) int {
|
||||
if len(lookup) < 2 {
|
||||
return -1
|
||||
}
|
||||
for i := range len(lookup) - 1 {
|
||||
y0, y1 := lookup[i], lookup[i+1]
|
||||
if y1 < y0 {
|
||||
y0, y1 = y1, y0
|
||||
}
|
||||
if y0 <= y && y <= y1 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func safe_inverse(x, fallback unit_float) unit_float {
|
||||
if x == 0 {
|
||||
return fallback
|
||||
}
|
||||
return 1 / x
|
||||
}
|
||||
|
||||
func (c *ConditionalZeroCurve) Prepare() error {
|
||||
c.inv_a = safe_inverse(c.a, 1)
|
||||
c.threshold, c.inv_gamma = -c.b*c.inv_a, safe_inverse(c.g, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConditionalZeroCurve) String() string {
|
||||
return fmt.Sprintf("ConditionalZeroCurve{a: %v b: %v g: %v}", c.a, c.b, c.g)
|
||||
}
|
||||
|
||||
func (c *ConditionalZeroCurve) Transform(x unit_float) unit_float {
|
||||
// Y = (aX+b)^g if X ≥ -b/a else 0
|
||||
if x >= c.threshold {
|
||||
if e := c.a*x + c.b; e > 0 {
|
||||
return pow(e, c.g)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *ConditionalZeroCurve) InverseTransform(y unit_float) unit_float {
|
||||
// X = (Y^(1/g) - b) / a if Y >= 0 else X = -b/a
|
||||
// the below doesnt match the actual spec but matches lcms2 implementation
|
||||
return max(0, (pow(y, c.inv_gamma)-c.b)*c.inv_a)
|
||||
}
|
||||
|
||||
func (c *ConditionalCCurve) Prepare() error {
|
||||
c.inv_a = safe_inverse(c.a, 1)
|
||||
c.threshold, c.inv_gamma = -c.b*c.inv_a, safe_inverse(c.g, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConditionalCCurve) String() string {
|
||||
return fmt.Sprintf("ConditionalCCurve{a: %v b: %v c: %v g: %v}", c.a, c.b, c.c, c.g)
|
||||
}
|
||||
|
||||
func (c *ConditionalCCurve) Transform(x unit_float) unit_float {
|
||||
// Y = (aX+b)^g + c if X ≥ -b/a else c
|
||||
if x >= c.threshold {
|
||||
if e := c.a*x + c.b; e > 0 {
|
||||
return pow(e, c.g) + c.c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return c.c
|
||||
}
|
||||
|
||||
func (c *ConditionalCCurve) InverseTransform(y unit_float) unit_float {
|
||||
// X = ((Y-c)^(1/g) - b) / a if Y >= c else X = -b/a
|
||||
if e := y - c.c; e >= 0 {
|
||||
if e == 0 {
|
||||
return 0
|
||||
}
|
||||
return (pow(e, c.inv_gamma) - c.b) * c.inv_a
|
||||
}
|
||||
return c.threshold
|
||||
}
|
||||
|
||||
func (c *SplitCurve) Prepare() error {
|
||||
c.threshold, c.inv_g, c.inv_a, c.inv_c = pow(c.a*c.d+c.b, c.g), safe_inverse(c.g, 0), safe_inverse(c.a, 1), safe_inverse(c.c, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func eq(a, b unit_float) bool { return abs(a-b) <= FLOAT_EQUALITY_THRESHOLD }
|
||||
|
||||
func (c *SplitCurve) IsSRGB() bool {
|
||||
s := SRGBCurve()
|
||||
return eq(s.a, c.a) && eq(s.b, c.b) && eq(s.c, c.c) && eq(s.d, c.d)
|
||||
}
|
||||
|
||||
func (c *SplitCurve) String() string {
|
||||
if c.IsSRGB() {
|
||||
return "SRGBCurve"
|
||||
}
|
||||
return fmt.Sprintf("SplitCurve{a: %v b: %v c: %v d: %v g: %v}", c.a, c.b, c.c, c.d, c.g)
|
||||
}
|
||||
|
||||
func (c *SplitCurve) Transform(x unit_float) unit_float {
|
||||
// Y = (aX+b)^g if X ≥ d else cX
|
||||
if x >= c.d {
|
||||
if e := c.a*x + c.b; e > 0 {
|
||||
return pow(e, c.g)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return c.c * x
|
||||
}
|
||||
|
||||
func (c *SplitCurve) InverseTransform(y unit_float) unit_float {
|
||||
// X=((Y^1/g-b)/a) | Y >= (ad+b)^g
|
||||
// X=Y/c | Y< (ad+b)^g
|
||||
if y < c.threshold {
|
||||
return y * c.inv_c
|
||||
}
|
||||
return (pow(y, c.inv_g) - c.b) * c.inv_a
|
||||
}
|
||||
|
||||
func (c *ComplexCurve) IsSRGB() bool {
|
||||
s := SRGBCurve()
|
||||
return eq(s.a, c.a) && eq(s.b, c.b) && eq(s.c, c.c) && eq(s.d, c.d) && eq(c.e, 0) && eq(c.f, 0)
|
||||
}
|
||||
|
||||
type IsSRGB interface {
|
||||
IsSRGB() bool
|
||||
}
|
||||
|
||||
func (c *ComplexCurve) Prepare() error {
|
||||
c.threshold, c.inv_g, c.inv_a, c.inv_c = pow(c.a*c.d+c.b, c.g)+c.e, safe_inverse(c.g, 0), safe_inverse(c.a, 1), safe_inverse(c.c, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ComplexCurve) String() string {
|
||||
return fmt.Sprintf("ComplexCurve{a: %v b: %v c: %v d: %v e: %v f: %v g: %v}", c.a, c.b, c.c, c.d, c.e, c.f, c.g)
|
||||
}
|
||||
|
||||
func (c *ComplexCurve) Transform(x unit_float) unit_float {
|
||||
// Y = (aX+b)^g + e if X ≥ d else cX+f
|
||||
if x >= c.d {
|
||||
if e := c.a*x + c.b; e > 0 {
|
||||
return pow(e, c.g) + c.e
|
||||
}
|
||||
return c.e
|
||||
}
|
||||
return c.c*x + c.f
|
||||
}
|
||||
|
||||
func (c *ComplexCurve) InverseTransform(y unit_float) unit_float {
|
||||
// X=((Y-e)1/g-b)/a | Y >=(ad+b)^g+e), cd+f
|
||||
// X=(Y-f)/c | else
|
||||
if y < c.threshold {
|
||||
return (y - c.f) * c.inv_c
|
||||
}
|
||||
if e := y - c.e; e > 0 {
|
||||
return (pow(e, c.inv_g) - c.b) * c.inv_a
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var SRGBCurve = sync.OnceValue(func() *SplitCurve {
|
||||
ans := &SplitCurve{g: 2.4, a: 1 / 1.055, b: 0.055 / 1.055, c: 1 / 12.92, d: 0.0031308 * 12.92}
|
||||
ans.Prepare()
|
||||
return ans
|
||||
})
|
||||
|
||||
var SRGBCurveTransformer = sync.OnceValue(func() Curves {
|
||||
return NewCurveTransformer("sRGB curve", SRGBCurve(), SRGBCurve(), SRGBCurve())
|
||||
})
|
||||
var SRGBCurveInverseTransformer = sync.OnceValue(func() Curves {
|
||||
return NewInverseCurveTransformer("TRC", SRGBCurve(), SRGBCurve(), SRGBCurve())
|
||||
})
|
||||
209
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_matrix.go
generated
vendored
Normal file
209
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_matrix.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type Translation [3]unit_float
|
||||
type Matrix3 [3][3]unit_float
|
||||
type IdentityMatrix int
|
||||
|
||||
type MatrixWithOffset struct {
|
||||
m ChannelTransformer
|
||||
offset1, offset2, offset3 unit_float
|
||||
}
|
||||
|
||||
func (m MatrixWithOffset) String() string {
|
||||
return fmt.Sprintf("MatrixWithOffset{ %.6v %.6v }", m.m, []unit_float{m.offset1, m.offset2, m.offset3})
|
||||
}
|
||||
|
||||
func is_identity_matrix(m *Matrix3) bool {
|
||||
if m == nil {
|
||||
return true
|
||||
}
|
||||
for r := range 3 {
|
||||
for c := range 3 {
|
||||
q := IfElse(r == c, unit_float(1), unit_float(0))
|
||||
if math.Abs(float64(m[r][c]-q)) > FLOAT_EQUALITY_THRESHOLD {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c Translation) String() string { return fmt.Sprintf("Translation{%.6v}", [3]unit_float(c)) }
|
||||
func (c *Translation) IOSig() (int, int) { return 3, 3 }
|
||||
func (c *Translation) Empty() bool { return c[0] == 0 && c[1] == 0 && c[2] == 0 }
|
||||
func (c *Translation) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *IdentityMatrix) String() string { return "IdentityMatrix" }
|
||||
func (c *IdentityMatrix) IOSig() (int, int) { return 3, 3 }
|
||||
func (c *MatrixWithOffset) IOSig() (int, int) { return 3, 3 }
|
||||
func (c *Matrix3) IOSig() (int, int) { return 3, 3 }
|
||||
func (c *IdentityMatrix) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *MatrixWithOffset) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
func (c *Matrix3) Iter(f func(ChannelTransformer) bool) { f(c) }
|
||||
|
||||
var _ ChannelTransformer = (*MatrixWithOffset)(nil)
|
||||
|
||||
func embeddedMatrixDecoder(body []byte) (any, error) {
|
||||
result := Matrix3{}
|
||||
if len(body) < 36 {
|
||||
return nil, fmt.Errorf("embedded matrix tag too short: %d < 36", len(body))
|
||||
}
|
||||
var m ChannelTransformer = &result
|
||||
for i := range 9 {
|
||||
result[i/3][i%3] = readS15Fixed16BE(body[:4])
|
||||
body = body[4:]
|
||||
}
|
||||
if is_identity_matrix(&result) {
|
||||
t := IdentityMatrix(0)
|
||||
m = &t
|
||||
}
|
||||
if len(body) < 3*4 {
|
||||
return m, nil
|
||||
}
|
||||
r2 := &MatrixWithOffset{m: m}
|
||||
r2.offset1 = readS15Fixed16BE(body[:4])
|
||||
r2.offset2 = readS15Fixed16BE(body[4:8])
|
||||
r2.offset3 = readS15Fixed16BE(body[8:12])
|
||||
if r2.offset1 == 0 && r2.offset2 == 0 && r2.offset3 == 0 {
|
||||
return m, nil
|
||||
}
|
||||
return r2, nil
|
||||
|
||||
}
|
||||
|
||||
func matrixDecoder(raw []byte) (any, error) {
|
||||
if len(raw) < 8+36 {
|
||||
return nil, errors.New("mtx tag too short")
|
||||
}
|
||||
return embeddedMatrixDecoder(raw[8:])
|
||||
}
|
||||
|
||||
func (m *Matrix3) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return m[0][0]*r + m[0][1]*g + m[0][2]*b, m[1][0]*r + m[1][1]*g + m[1][2]*b, m[2][0]*r + m[2][1]*g + m[2][2]*b
|
||||
}
|
||||
func (m *Matrix3) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func (m *Matrix3) Transpose() Matrix3 {
|
||||
return Matrix3{
|
||||
{m[0][0], m[1][0], m[2][0]},
|
||||
{m[0][1], m[1][1], m[2][1]},
|
||||
{m[0][2], m[1][2], m[2][2]},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Matrix3) Scale(s unit_float) {
|
||||
m[0][0] *= s
|
||||
m[0][1] *= s
|
||||
m[0][2] *= s
|
||||
m[1][0] *= s
|
||||
m[1][1] *= s
|
||||
m[1][2] *= s
|
||||
m[2][0] *= s
|
||||
m[2][1] *= s
|
||||
m[2][2] *= s
|
||||
}
|
||||
|
||||
func Dot(v1, v2 [3]unit_float) unit_float {
|
||||
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
|
||||
}
|
||||
|
||||
func (m *Matrix3) Equals(o *Matrix3, threshold unit_float) bool {
|
||||
for r := range 3 {
|
||||
ar, br := m[r], o[r]
|
||||
for c := range 3 {
|
||||
if abs(ar[c]-br[c]) > threshold {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Matrix3) String() string {
|
||||
return fmt.Sprintf("Matrix3{ %.6v, %.6v, %.6v }", m[0], m[1], m[2])
|
||||
}
|
||||
|
||||
func (m *Matrix3) AsMatrix3() *Matrix3 { return m }
|
||||
func NewScalingMatrix3(scale unit_float) *Matrix3 {
|
||||
return &Matrix3{{scale, 0, 0}, {0, scale, 0}, {0, 0, scale}}
|
||||
}
|
||||
func (m *IdentityMatrix) AsMatrix3() *Matrix3 { return NewScalingMatrix3(1) }
|
||||
|
||||
// Return m * o
|
||||
func (m *Matrix3) Multiply(o Matrix3) Matrix3 {
|
||||
t := o.Transpose()
|
||||
return Matrix3{
|
||||
{Dot(t[0], m[0]), Dot(t[1], m[0]), Dot(t[2], m[0])},
|
||||
{Dot(t[0], m[1]), Dot(t[1], m[1]), Dot(t[2], m[1])},
|
||||
{Dot(t[0], m[2]), Dot(t[1], m[2]), Dot(t[2], m[2])},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Matrix3) Inverted() (ans Matrix3, err error) {
|
||||
o := Matrix3{
|
||||
{
|
||||
m[1][1]*m[2][2] - m[2][1]*m[1][2],
|
||||
-(m[0][1]*m[2][2] - m[2][1]*m[0][2]),
|
||||
m[0][1]*m[1][2] - m[1][1]*m[0][2],
|
||||
},
|
||||
{
|
||||
-(m[1][0]*m[2][2] - m[2][0]*m[1][2]),
|
||||
m[0][0]*m[2][2] - m[2][0]*m[0][2],
|
||||
-(m[0][0]*m[1][2] - m[1][0]*m[0][2]),
|
||||
},
|
||||
{
|
||||
m[1][0]*m[2][1] - m[2][0]*m[1][1],
|
||||
-(m[0][0]*m[2][1] - m[2][0]*m[0][1]),
|
||||
m[0][0]*m[1][1] - m[1][0]*m[0][1],
|
||||
},
|
||||
}
|
||||
|
||||
det := m[0][0]*o[0][0] + m[1][0]*o[0][1] + m[2][0]*o[0][2]
|
||||
if abs(det) < FLOAT_EQUALITY_THRESHOLD {
|
||||
return ans, fmt.Errorf("matrix is singular and cannot be inverted, det=%v", det)
|
||||
}
|
||||
det = 1 / det
|
||||
|
||||
o[0][0] *= det
|
||||
o[0][1] *= det
|
||||
o[0][2] *= det
|
||||
o[1][0] *= det
|
||||
o[1][1] *= det
|
||||
o[1][2] *= det
|
||||
o[2][0] *= det
|
||||
o[2][1] *= det
|
||||
o[2][2] *= det
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (m *Translation) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return r + m[0], g + m[1], b + m[2]
|
||||
}
|
||||
|
||||
func (m *Translation) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func (m IdentityMatrix) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
return r, g, b
|
||||
}
|
||||
func (m IdentityMatrix) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
|
||||
func (m *MatrixWithOffset) Translation() *Translation {
|
||||
if m.offset1 == 0 && m.offset2 == 0 && m.offset3 == 0 {
|
||||
return nil
|
||||
}
|
||||
return &Translation{m.offset1, m.offset2, m.offset3}
|
||||
}
|
||||
|
||||
func (m *MatrixWithOffset) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
r, g, b = m.m.Transform(r, g, b)
|
||||
r += m.offset1
|
||||
g += m.offset2
|
||||
b += m.offset3
|
||||
return r, g, b
|
||||
}
|
||||
func (m *MatrixWithOffset) TransformGeneral(o, i []unit_float) { tg33(m.Transform, o, i) }
|
||||
171
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_mft.go
generated
vendored
Normal file
171
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_mft.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type MFT struct {
|
||||
in_channels, out_channels int
|
||||
grid_points []int
|
||||
input_curve, output_curve Curves
|
||||
clut CLUT
|
||||
matrix ChannelTransformer
|
||||
is8bit bool
|
||||
}
|
||||
|
||||
func (c MFT) String() string {
|
||||
return fmt.Sprintf("MFT{grid_points:%v, matrix:%v input:%v, clut:%v, output:%v }", c.grid_points, c.matrix, c.input_curve, c.clut, c.output_curve)
|
||||
}
|
||||
|
||||
func (c *MFT) IOSig() (int, int) { return c.in_channels, c.out_channels }
|
||||
func (c *MFT) Iter(f func(ChannelTransformer) bool) {
|
||||
if mo, ok := c.matrix.(*MatrixWithOffset); ok {
|
||||
if _, ok := mo.m.(*IdentityMatrix); !ok {
|
||||
if !f(mo.m) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if tt := mo.Translation(); tt != nil {
|
||||
if !f(tt) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
} else if !f(c.matrix) {
|
||||
return
|
||||
}
|
||||
if !f(c.input_curve) {
|
||||
return
|
||||
}
|
||||
if !f(c.clut) {
|
||||
return
|
||||
}
|
||||
if !f(c.output_curve) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var _ ChannelTransformer = (*MFT)(nil)
|
||||
|
||||
func (mft *MFT) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
// Apply matrix
|
||||
r, g, b = mft.matrix.Transform(r, g, b)
|
||||
// Apply input curves with linear interpolation
|
||||
r, g, b = mft.input_curve.Transform(r, g, b)
|
||||
// Apply CLUT
|
||||
r, g, b = mft.clut.Transform(r, g, b)
|
||||
// Apply output curves with interpolation
|
||||
r, g, b = mft.output_curve.Transform(r, g, b)
|
||||
return r, g, b
|
||||
}
|
||||
|
||||
func (mft *MFT) TransformGeneral(o, i []unit_float) {
|
||||
mft.matrix.TransformGeneral(o, i)
|
||||
// Apply input curves with linear interpolation
|
||||
mft.input_curve.TransformGeneral(o, i)
|
||||
// Apply CLUT
|
||||
mft.clut.TransformGeneral(o, i)
|
||||
// Apply output curves with interpolation
|
||||
mft.output_curve.TransformGeneral(o, i)
|
||||
}
|
||||
|
||||
func load_8bit_table(raw []byte, n int) (output []unit_float, leftover []byte, err error) {
|
||||
if len(raw) < n {
|
||||
return nil, raw, fmt.Errorf("mft2 tag too short")
|
||||
}
|
||||
output = make([]unit_float, n)
|
||||
for i := range n {
|
||||
output[i] = unit_float(raw[0]) / 255
|
||||
raw = raw[1:]
|
||||
}
|
||||
return output, raw, nil
|
||||
}
|
||||
|
||||
func load_16bit_table(raw []byte, n int) (output []unit_float, leftover []byte, err error) {
|
||||
if len(raw) < 2*n {
|
||||
return nil, raw, fmt.Errorf("mft2 tag too short")
|
||||
}
|
||||
output = make([]unit_float, n)
|
||||
for i := range n {
|
||||
output[i] = unit_float(binary.BigEndian.Uint16(raw[:2])) / 65535
|
||||
raw = raw[2:]
|
||||
}
|
||||
return output, raw, nil
|
||||
}
|
||||
|
||||
func load_mft_header(raw []byte) (ans *MFT, leftover []byte, err error) {
|
||||
if len(raw) < 48 {
|
||||
return nil, raw, errors.New("mft tag too short")
|
||||
}
|
||||
a := MFT{}
|
||||
var grid_points int
|
||||
a.in_channels, a.out_channels, grid_points = int(raw[8]), int(raw[9]), int(raw[10])
|
||||
if grid_points < 2 {
|
||||
return nil, raw, fmt.Errorf("mft tag has invalid number of CLUT grid points: %d", a.grid_points)
|
||||
}
|
||||
a.grid_points = make([]int, a.in_channels)
|
||||
for i := range a.in_channels {
|
||||
a.grid_points[i] = grid_points
|
||||
}
|
||||
ma, err := embeddedMatrixDecoder(raw[12:48])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
a.matrix = ma.(ChannelTransformer)
|
||||
return &a, raw[48:], nil
|
||||
}
|
||||
|
||||
func load_mft_body(a *MFT, raw []byte, load_table func([]byte, int) ([]unit_float, []byte, error), input_table_entries, output_table_entries int, input_colorspace, output_colorspace ColorSpace, bytes_per_channel int) (err error) {
|
||||
input_curves := make([]Curve1D, a.in_channels)
|
||||
output_curves := make([]Curve1D, a.out_channels)
|
||||
var fp []unit_float
|
||||
for i := range a.in_channels {
|
||||
if fp, raw, err = load_table(raw, input_table_entries); err != nil {
|
||||
return err
|
||||
}
|
||||
if input_curves[i], err = load_points_curve(fp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
a.input_curve = NewCurveTransformer("Input", input_curves...)
|
||||
fp, consumed, err := decode_clut_table(raw, int(bytes_per_channel), a.out_channels, a.grid_points, output_colorspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
raw = raw[consumed:]
|
||||
a.clut = make_clut(a.grid_points, a.in_channels, a.out_channels, fp, true, false)
|
||||
for i := range a.out_channels {
|
||||
if fp, raw, err = load_table(raw, output_table_entries); err != nil {
|
||||
return err
|
||||
}
|
||||
if output_curves[i], err = load_points_curve(fp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
a.output_curve = NewCurveTransformer("Output", output_curves...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decode_mft8(raw []byte, input_colorspace, output_colorspace ColorSpace) (ans any, err error) {
|
||||
var a *MFT
|
||||
if a, raw, err = load_mft_header(raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.is8bit = true
|
||||
err = load_mft_body(a, raw, load_8bit_table, 256, 256, input_colorspace, output_colorspace, 1)
|
||||
return a, err
|
||||
}
|
||||
|
||||
func decode_mft16(raw []byte, input_colorspace, output_colorspace ColorSpace) (ans any, err error) {
|
||||
var a *MFT
|
||||
if a, raw, err = load_mft_header(raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
input_table_entries, output_table_entries := binary.BigEndian.Uint16(raw[:2]), binary.BigEndian.Uint16(raw[2:4])
|
||||
err = load_mft_body(a, raw[4:], load_16bit_table, int(input_table_entries), int(output_table_entries), input_colorspace, output_colorspace, 2)
|
||||
return a, err
|
||||
}
|
||||
180
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_modular.go
generated
vendored
Normal file
180
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tags_modular.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// ModularTag represents a modular tag section 10.12 and 10.13 of ICC.1-2202-05.pdf
|
||||
type ModularTag struct {
|
||||
num_input_channels, num_output_channels int
|
||||
a_curves, m_curves, b_curves []Curve1D
|
||||
clut, matrix ChannelTransformer
|
||||
transform_objects []ChannelTransformer
|
||||
is_a_to_b bool
|
||||
}
|
||||
|
||||
func (m ModularTag) String() string {
|
||||
return fmt.Sprintf("%s{ %s }", IfElse(m.is_a_to_b, "mAB", "mBA"), transformers_as_string(m.transform_objects...))
|
||||
}
|
||||
|
||||
var _ ChannelTransformer = (*ModularTag)(nil)
|
||||
|
||||
func (m *ModularTag) Iter(f func(ChannelTransformer) bool) {
|
||||
for _, c := range m.transform_objects {
|
||||
if !f(c) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModularTag) IOSig() (i int, o int) {
|
||||
i, _ = m.transform_objects[0].IOSig()
|
||||
_, o = m.transform_objects[len(m.transform_objects)-1].IOSig()
|
||||
return
|
||||
}
|
||||
|
||||
func (m *ModularTag) IsSuitableFor(num_input_channels, num_output_channels int) bool {
|
||||
return m.num_input_channels == num_input_channels && m.num_output_channels == num_output_channels
|
||||
}
|
||||
func (m *ModularTag) Transform(r, g, b unit_float) (unit_float, unit_float, unit_float) {
|
||||
for _, t := range m.transform_objects {
|
||||
r, g, b = t.Transform(r, g, b)
|
||||
}
|
||||
return r, g, b
|
||||
}
|
||||
|
||||
func (m *ModularTag) TransformGeneral(o, i []unit_float) {
|
||||
for _, t := range m.transform_objects {
|
||||
t.TransformGeneral(o, i)
|
||||
}
|
||||
}
|
||||
|
||||
func IfElse[T any](condition bool, if_val T, else_val T) T {
|
||||
if condition {
|
||||
return if_val
|
||||
}
|
||||
return else_val
|
||||
}
|
||||
|
||||
func modularDecoder(raw []byte, _, output_colorspace ColorSpace) (ans any, err error) {
|
||||
if len(raw) < 40 {
|
||||
return nil, errors.New("modular (mAB/mBA) tag too short")
|
||||
}
|
||||
var s Signature
|
||||
_, _ = binary.Decode(raw[:4], binary.BigEndian, &s)
|
||||
is_a_to_b := false
|
||||
switch s {
|
||||
case LutAtoBTypeSignature:
|
||||
is_a_to_b = true
|
||||
case LutBtoATypeSignature:
|
||||
is_a_to_b = false
|
||||
default:
|
||||
return nil, fmt.Errorf("modular tag has unknown signature: %s", s)
|
||||
}
|
||||
inputCh, outputCh := int(raw[8]), int(raw[9])
|
||||
var offsets [5]uint32
|
||||
if _, err := binary.Decode(raw[12:], binary.BigEndian, offsets[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, matrix, m, clut, a := offsets[0], offsets[1], offsets[2], offsets[3], offsets[4]
|
||||
mt := &ModularTag{num_input_channels: inputCh, num_output_channels: outputCh, is_a_to_b: is_a_to_b}
|
||||
read_curves := func(offset uint32, num_curves_reqd int) (ans []Curve1D, err error) {
|
||||
if offset == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if int(offset)+8 > len(raw) {
|
||||
return nil, errors.New("modular (mAB/mBA) tag too short")
|
||||
}
|
||||
block := raw[offset:]
|
||||
var c any
|
||||
var consumed int
|
||||
for range inputCh {
|
||||
if len(block) < 4 {
|
||||
return nil, errors.New("modular (mAB/mBA) tag too short")
|
||||
}
|
||||
sig := Signature(binary.BigEndian.Uint32(block[:4]))
|
||||
switch sig {
|
||||
case CurveTypeSignature:
|
||||
c, consumed, err = embeddedCurveDecoder(block)
|
||||
case ParametricCurveTypeSignature:
|
||||
c, consumed, err = embeddedParametricCurveDecoder(block)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown curve type: %s in modularDecoder", sig)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block = block[consumed:]
|
||||
ans = append(ans, c.(Curve1D))
|
||||
}
|
||||
if len(ans) != num_curves_reqd {
|
||||
return nil, fmt.Errorf("number of curves in modular tag: %d does not match the number of channels: %d", len(ans), num_curves_reqd)
|
||||
}
|
||||
return
|
||||
}
|
||||
if mt.b_curves, err = read_curves(b, IfElse(is_a_to_b, outputCh, inputCh)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mt.a_curves, err = read_curves(a, IfElse(is_a_to_b, inputCh, outputCh)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mt.m_curves, err = read_curves(m, outputCh); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var temp any
|
||||
if clut > 0 {
|
||||
if temp, err = embeddedClutDecoder(raw[clut:], inputCh, outputCh, output_colorspace, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt.clut = temp.(ChannelTransformer)
|
||||
}
|
||||
if matrix > 0 {
|
||||
if temp, err = embeddedMatrixDecoder(raw[matrix:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, is_identity_matrix := temp.(*IdentityMatrix); !is_identity_matrix {
|
||||
mt.matrix = temp.(ChannelTransformer)
|
||||
}
|
||||
}
|
||||
ans = mt
|
||||
add_curves := func(name string, c []Curve1D) {
|
||||
if len(c) > 0 {
|
||||
has_non_identity := false
|
||||
for _, x := range c {
|
||||
if _, ok := x.(*IdentityCurve); !ok {
|
||||
has_non_identity = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if has_non_identity {
|
||||
nc := NewCurveTransformer(name, c...)
|
||||
mt.transform_objects = append(mt.transform_objects, nc)
|
||||
}
|
||||
}
|
||||
}
|
||||
add_curves("A", mt.a_curves)
|
||||
if mt.clut != nil {
|
||||
mt.transform_objects = append(mt.transform_objects, mt.clut)
|
||||
}
|
||||
add_curves("M", mt.m_curves)
|
||||
if mt.matrix != nil {
|
||||
if mo, ok := mt.matrix.(*MatrixWithOffset); ok {
|
||||
if _, ok := mo.m.(*IdentityMatrix); !ok {
|
||||
mt.transform_objects = append(mt.transform_objects, mo.m)
|
||||
}
|
||||
if tt := mo.Translation(); tt != nil {
|
||||
mt.transform_objects = append(mt.transform_objects, tt)
|
||||
}
|
||||
} else {
|
||||
mt.transform_objects = append(mt.transform_objects, mt.matrix)
|
||||
}
|
||||
}
|
||||
add_curves("B", mt.b_curves)
|
||||
if !is_a_to_b {
|
||||
slices.Reverse(mt.transform_objects)
|
||||
}
|
||||
return
|
||||
}
|
||||
240
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tagtable.go
generated
vendored
240
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/tagtable.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package icc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
@@ -18,20 +19,105 @@ type unsupported struct {
|
||||
}
|
||||
|
||||
func (e *unsupported) Error() string {
|
||||
return fmt.Sprintf("the tag: %s is not supported", e.sig)
|
||||
return fmt.Sprintf("the tag: %s (0x%x) is not supported", e.sig, uint32(e.sig))
|
||||
}
|
||||
|
||||
func parse_tag(sig Signature, data []byte) (result any, err error) {
|
||||
type XYZType struct{ X, Y, Z unit_float }
|
||||
|
||||
func xyz_type(data []byte) XYZType {
|
||||
return XYZType{readS15Fixed16BE(data[:4]), readS15Fixed16BE(data[4:8]), readS15Fixed16BE(data[8:12])}
|
||||
}
|
||||
|
||||
func f(t unit_float) unit_float {
|
||||
const Limit = (24.0 / 116.0) * (24.0 / 116.0) * (24.0 / 116.0)
|
||||
|
||||
if t <= Limit {
|
||||
return (841.0/108.0)*t + (16.0 / 116.0)
|
||||
}
|
||||
return pow(t, 1.0/3.0)
|
||||
}
|
||||
|
||||
func f_1(t unit_float) unit_float {
|
||||
const Limit = (24.0 / 116.0)
|
||||
|
||||
if t <= Limit {
|
||||
return (108.0 / 841.0) * (t - (16.0 / 116.0))
|
||||
}
|
||||
|
||||
return t * t * t
|
||||
}
|
||||
|
||||
func (wt *XYZType) Lab_to_XYZ(l, a, b unit_float) (x, y, z unit_float) {
|
||||
y = (l + 16.0) / 116.0
|
||||
x = y + 0.002*a
|
||||
z = y - 0.005*b
|
||||
|
||||
x = f_1(x) * wt.X
|
||||
y = f_1(y) * wt.Y
|
||||
z = f_1(z) * wt.Z
|
||||
return
|
||||
}
|
||||
|
||||
func (wt *XYZType) XYZ_to_Lab(x, y, z unit_float) (l, a, b unit_float) {
|
||||
fx := f(x / wt.X)
|
||||
fy := f(y / wt.Y)
|
||||
fz := f(z / wt.Z)
|
||||
|
||||
l = 116.0*fy - 16.0
|
||||
a = 500.0 * (fx - fy)
|
||||
b = 200.0 * (fy - fz)
|
||||
return
|
||||
}
|
||||
|
||||
func decode_xyz(data []byte) (ans any, err error) {
|
||||
if len(data) < 20 {
|
||||
return nil, fmt.Errorf("xyz tag too short")
|
||||
}
|
||||
a := xyz_type(data[8:])
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
func decode_array(data []byte) (ans any, err error) {
|
||||
data = data[8:]
|
||||
a := make([]unit_float, len(data)/4)
|
||||
for i := range a {
|
||||
a[i] = readS15Fixed16BE(data[:4:4])
|
||||
data = data[4:]
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func parse_tag(sig Signature, data []byte, input_colorspace, output_colorspace ColorSpace) (result any, err error) {
|
||||
if len(data) == 0 {
|
||||
return nil, ¬_found{sig}
|
||||
}
|
||||
switch sig {
|
||||
default:
|
||||
if len(data) < 4 {
|
||||
return nil, &unsupported{sig}
|
||||
case DescSignature, DeviceManufacturerDescriptionSignature, DeviceModelDescriptionSignature:
|
||||
}
|
||||
s := signature(data)
|
||||
switch s {
|
||||
default:
|
||||
return nil, &unsupported{s}
|
||||
case DescSignature, DeviceManufacturerDescriptionSignature, DeviceModelDescriptionSignature, MultiLocalisedUnicodeSignature, TextTagSignature:
|
||||
return parse_text_tag(data)
|
||||
case SignateTagSignature:
|
||||
return sigDecoder(data)
|
||||
case MatrixElemTypeSignature:
|
||||
return matrixDecoder(data)
|
||||
case LutAtoBTypeSignature, LutBtoATypeSignature:
|
||||
return modularDecoder(data, input_colorspace, output_colorspace)
|
||||
case Lut16TypeSignature:
|
||||
return decode_mft16(data, input_colorspace, output_colorspace)
|
||||
case Lut8TypeSignature:
|
||||
return decode_mft8(data, input_colorspace, output_colorspace)
|
||||
case XYZTypeSignature:
|
||||
return decode_xyz(data)
|
||||
case S15Fixed16ArrayTypeSignature:
|
||||
return decode_array(data)
|
||||
case CurveTypeSignature:
|
||||
return curveDecoder(data)
|
||||
case ParametricCurveTypeSignature:
|
||||
return parametricCurveDecoder(data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,34 +126,59 @@ type parsed_tag struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type raw_tag_entry struct {
|
||||
offset int
|
||||
data []byte
|
||||
}
|
||||
|
||||
type parse_cache_key struct {
|
||||
offset, size int
|
||||
}
|
||||
|
||||
type TagTable struct {
|
||||
entries map[Signature][]byte
|
||||
lock sync.Mutex
|
||||
parsed map[Signature]parsed_tag
|
||||
entries map[Signature]raw_tag_entry
|
||||
lock sync.Mutex
|
||||
parsed map[Signature]parsed_tag
|
||||
parse_cache map[parse_cache_key]parsed_tag
|
||||
}
|
||||
|
||||
func (t *TagTable) add(sig Signature, data []byte) {
|
||||
t.entries[sig] = data
|
||||
func (t *TagTable) Has(sig Signature) bool {
|
||||
return t.entries[sig].data != nil
|
||||
}
|
||||
|
||||
func (t *TagTable) get_parsed(sig Signature) (ans any, err error) {
|
||||
func (t *TagTable) add(sig Signature, offset int, data []byte) {
|
||||
t.entries[sig] = raw_tag_entry{offset, data}
|
||||
}
|
||||
|
||||
func (t *TagTable) get_parsed(sig Signature, input_colorspace, output_colorspace ColorSpace) (ans any, err error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.parsed == nil {
|
||||
t.parsed = make(map[Signature]parsed_tag)
|
||||
t.parse_cache = make(map[parse_cache_key]parsed_tag)
|
||||
}
|
||||
existing, found := t.parsed[sig]
|
||||
if found {
|
||||
return existing.tag, existing.err
|
||||
}
|
||||
if t.parsed == nil {
|
||||
t.parsed = make(map[Signature]parsed_tag)
|
||||
}
|
||||
var key parse_cache_key
|
||||
defer func() {
|
||||
t.parsed[sig] = parsed_tag{ans, err}
|
||||
t.parse_cache[key] = parsed_tag{ans, err}
|
||||
}()
|
||||
return parse_tag(sig, t.entries[sig])
|
||||
re := t.entries[sig]
|
||||
if re.data == nil {
|
||||
return nil, ¬_found{sig}
|
||||
}
|
||||
key = parse_cache_key{re.offset, len(re.data)}
|
||||
if cached, ok := t.parse_cache[key]; ok {
|
||||
return cached.tag, cached.err
|
||||
}
|
||||
return parse_tag(sig, re.data, input_colorspace, output_colorspace)
|
||||
}
|
||||
|
||||
func (t *TagTable) getDescription(s Signature) (string, error) {
|
||||
q, err := t.get_parsed(s)
|
||||
q, err := t.get_parsed(s, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get description for %s with error: %w", s, err)
|
||||
}
|
||||
@@ -90,14 +201,97 @@ func (t *TagTable) getDeviceModelDescription() (string, error) {
|
||||
return t.getDescription(DeviceModelDescriptionSignature)
|
||||
}
|
||||
|
||||
func emptyTagTable() TagTable {
|
||||
return TagTable{
|
||||
entries: make(map[Signature][]byte),
|
||||
func (t *TagTable) load_curve_tag(s Signature) (Curve1D, error) {
|
||||
r, err := t.get_parsed(s, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load %s tag from profile with error: %w", s, err)
|
||||
}
|
||||
if ans, ok := r.(Curve1D); !ok {
|
||||
return nil, fmt.Errorf("could not load %s tag from profile as it is of unsupported type: %T", s, r)
|
||||
} else {
|
||||
if _, ok := r.(*IdentityCurve); ok {
|
||||
return nil, nil
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
}
|
||||
|
||||
type ChannelTransformer interface {
|
||||
Transform(output, workspace []float64, input ...float64) error
|
||||
IsSuitableFor(num_input_channels int, num_output_channels int) bool
|
||||
WorkspaceSize() int
|
||||
func (t *TagTable) load_rgb_matrix(forward bool) (ans *Matrix3, err error) {
|
||||
r, err := t.get_parsed(RedMatrixColumnTagSignature, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g, err := t.get_parsed(GreenMatrixColumnTagSignature, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := t.get_parsed(BlueMatrixColumnTagSignature, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc, bc, gc := r.(*XYZType), g.(*XYZType), b.(*XYZType)
|
||||
var m Matrix3
|
||||
m[0][0], m[0][1], m[0][2] = rc.X, bc.X, gc.X
|
||||
m[1][0], m[1][1], m[1][2] = rc.Y, bc.Y, gc.Y
|
||||
m[2][0], m[2][1], m[2][2] = rc.Z, bc.Z, gc.Z
|
||||
// stored in 2.15 format so need to scale, see
|
||||
// BuildRGBInputMatrixShaper in lcms
|
||||
m.Scale(MAX_ENCODEABLE_XYZ_INVERSE)
|
||||
|
||||
if is_identity_matrix(&m) {
|
||||
return nil, nil
|
||||
}
|
||||
if forward {
|
||||
return &m, nil
|
||||
}
|
||||
inv, err := m.Inverted()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("the colorspace conversion matrix is not invertible: %w", err)
|
||||
}
|
||||
return &inv, nil
|
||||
}
|
||||
|
||||
func array_to_matrix(a []unit_float) *Matrix3 {
|
||||
_ = a[8]
|
||||
m := Matrix3{}
|
||||
copy(m[0][:], a[:3])
|
||||
copy(m[1][:], a[3:6])
|
||||
copy(m[2][:], a[6:9])
|
||||
if is_identity_matrix(&m) {
|
||||
return nil
|
||||
}
|
||||
return &m
|
||||
}
|
||||
|
||||
func (p *TagTable) get_chromatic_adaption() (*Matrix3, error) {
|
||||
x, err := p.get_parsed(ChromaticAdaptationTagSignature, ColorSpaceRGB, ColorSpaceXYZ)
|
||||
if err != nil {
|
||||
var nf *not_found
|
||||
if errors.As(err, &nf) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
a, ok := x.([]unit_float)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("chad tag is not an ArrayType")
|
||||
}
|
||||
return array_to_matrix(a), nil
|
||||
}
|
||||
|
||||
func emptyTagTable() TagTable {
|
||||
return TagTable{
|
||||
entries: make(map[Signature]raw_tag_entry),
|
||||
}
|
||||
}
|
||||
|
||||
type Debug_callback = func(r, g, b, x, y, z unit_float, t ChannelTransformer)
|
||||
|
||||
type ChannelTransformer interface {
|
||||
Transform(r, g, b unit_float) (x, y, z unit_float)
|
||||
TransformGeneral(out, in []unit_float)
|
||||
IOSig() (num_inputs, num_outputs int)
|
||||
// Should yield only itself unless it is a container, in which case it should yield its contained transforms
|
||||
Iter(func(ChannelTransformer) bool)
|
||||
String() string
|
||||
}
|
||||
|
||||
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/test-profiles/sRGB-v4.icc
generated
vendored
Normal file
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/icc/test-profiles/sRGB-v4.icc
generated
vendored
Normal file
Binary file not shown.
3
vendor/github.com/kovidgoyal/imaging/prism/meta/imageformat.go
generated
vendored
3
vendor/github.com/kovidgoyal/imaging/prism/meta/imageformat.go
generated
vendored
@@ -1,3 +0,0 @@
|
||||
package meta
|
||||
|
||||
type ImageFormat string
|
||||
17
vendor/github.com/kovidgoyal/imaging/prism/meta/jpegmeta/jpegmeta.go
generated
vendored
17
vendor/github.com/kovidgoyal/imaging/prism/meta/jpegmeta/jpegmeta.go
generated
vendored
@@ -4,15 +4,14 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/kovidgoyal/go-parallel"
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/streams"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
// Format specifies the image format handled by this package
|
||||
var Format = meta.ImageFormat("JPEG")
|
||||
|
||||
const exifSignature = "Exif\x00\x00"
|
||||
|
||||
var iccProfileIdentifier = []byte("ICC_PROFILE\x00")
|
||||
@@ -24,9 +23,6 @@ var iccProfileIdentifier = []byte("ICC_PROFILE\x00")
|
||||
// reading from it will produce the same results as fully reading the input
|
||||
// stream. This provides a convenient way to load the full image after loading
|
||||
// the metadata.
|
||||
//
|
||||
// An error is returned if basic metadata could not be extracted. The returned
|
||||
// stream still provides the full image data.
|
||||
func Load(r io.Reader) (md *meta.Data, imgStream io.Reader, err error) {
|
||||
imgStream, err = streams.CallbackWithSeekable(r, func(r io.Reader) (err error) {
|
||||
md, err = ExtractMetadata(r)
|
||||
@@ -38,7 +34,7 @@ func Load(r io.Reader) (md *meta.Data, imgStream io.Reader, err error) {
|
||||
// Same as Load() except that no new stream is provided
|
||||
func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
metadataExtracted := false
|
||||
md = &meta.Data{Format: Format}
|
||||
md = &meta.Data{Format: types.JPEG}
|
||||
segReader := NewSegmentReader(r)
|
||||
|
||||
defer func() {
|
||||
@@ -63,10 +59,13 @@ func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
|
||||
soiSegment, err := segReader.ReadSegment()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "invalid marker identifier") {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if soiSegment.Marker.Type != markerTypeStartOfImage {
|
||||
return nil, fmt.Errorf("stream does not begin with start-of-image")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
parseSegments:
|
||||
@@ -145,7 +144,7 @@ parseSegments:
|
||||
if !metadataExtracted {
|
||||
return nil, fmt.Errorf("no metadata found")
|
||||
}
|
||||
md.ExifData = exif
|
||||
md.SetExifData(exif)
|
||||
|
||||
// Incomplete or missing ICC profile
|
||||
if len(iccProfileChunks) != iccProfileChunksExtracted {
|
||||
|
||||
28
vendor/github.com/kovidgoyal/imaging/prism/meta/netpbmmeta/netpbmmeta.go
generated
vendored
Normal file
28
vendor/github.com/kovidgoyal/imaging/prism/meta/netpbmmeta/netpbmmeta.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package netpbmmeta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/kovidgoyal/imaging/netpbm"
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/prism/meta/tiffmeta"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
c, fmt, err := netpbm.DecodeConfigAndFormat(r)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unsupported netPBM format") {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
md = &meta.Data{
|
||||
Format: fmt, PixelWidth: uint32(c.Width), PixelHeight: uint32(c.Height),
|
||||
BitsPerComponent: tiffmeta.BitsPerComponent(c.ColorModel),
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
13
vendor/github.com/kovidgoyal/imaging/prism/meta/pngmeta/chunktypes.go
generated
vendored
13
vendor/github.com/kovidgoyal/imaging/prism/meta/pngmeta/chunktypes.go
generated
vendored
@@ -1,6 +1,11 @@
|
||||
package pngmeta
|
||||
|
||||
var chunkTypeiCCP = [4]byte{'i', 'C', 'C', 'P'}
|
||||
var chunkTypeIDAT = [4]byte{'I', 'D', 'A', 'T'}
|
||||
var chunkTypeIEND = [4]byte{'I', 'E', 'N', 'D'}
|
||||
var chunkTypeIHDR = [4]byte{'I', 'H', 'D', 'R'}
|
||||
const (
|
||||
chunkTypeiCCP = "iCCP"
|
||||
chunkTypeIDAT = "IDAT"
|
||||
chunkTypeIEND = "IEND"
|
||||
chunkTypeIHDR = "IHDR"
|
||||
chunkTypeacTL = "acTL"
|
||||
chunkTypeeXIf = "eXIf"
|
||||
chunkTypecICP = "eICP"
|
||||
)
|
||||
|
||||
52
vendor/github.com/kovidgoyal/imaging/prism/meta/pngmeta/pngmeta.go
generated
vendored
52
vendor/github.com/kovidgoyal/imaging/prism/meta/pngmeta/pngmeta.go
generated
vendored
@@ -7,14 +7,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unsafe"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/streams"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
// Format specifies the image format handled by this package
|
||||
var Format = meta.ImageFormat("PNG")
|
||||
|
||||
var pngSignature = [8]byte{0x89, 'P', 'N', 'G', 0x0D, 0x0A, 0x1A, 0x0A}
|
||||
|
||||
// Load loads the metadata for a PNG image stream.
|
||||
@@ -49,7 +48,7 @@ func skip_chunk(r io.Reader, length uint32) (err error) {
|
||||
// Same as Load() except that no new stream is provided
|
||||
func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
metadataExtracted := false
|
||||
md = &meta.Data{Format: Format}
|
||||
md = &meta.Data{Format: types.PNG}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -59,10 +58,10 @@ func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
err = fmt.Errorf("panic while extracting image metadata: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
found_exif := false
|
||||
allMetadataExtracted := func() bool {
|
||||
iccData, iccErr := md.ICCProfileData()
|
||||
return metadataExtracted && (iccData != nil || iccErr != nil)
|
||||
return metadataExtracted && md.HasFrames && found_exif && (iccData != nil || iccErr != nil) && md.CICP.IsSet
|
||||
}
|
||||
|
||||
pngSig := [8]byte{}
|
||||
@@ -70,7 +69,7 @@ func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
return nil, err
|
||||
}
|
||||
if pngSig != pngSignature {
|
||||
return nil, fmt.Errorf("invalid PNG signature")
|
||||
return nil, nil
|
||||
}
|
||||
var chunk []byte
|
||||
|
||||
@@ -93,7 +92,7 @@ parseChunks:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch ch.ChunkType {
|
||||
switch unsafe.String(unsafe.SliceData(ch.ChunkType[:]), 4) {
|
||||
|
||||
case chunkTypeIHDR:
|
||||
if chunk, err = read_chunk(r, ch.Length); err != nil {
|
||||
@@ -111,6 +110,43 @@ parseChunks:
|
||||
break parseChunks
|
||||
}
|
||||
|
||||
case chunkTypeeXIf:
|
||||
if chunk, err = read_chunk(r, ch.Length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
found_exif = true
|
||||
md.SetExifData(chunk)
|
||||
if allMetadataExtracted() {
|
||||
break parseChunks
|
||||
}
|
||||
|
||||
case chunkTypecICP:
|
||||
if chunk, err = read_chunk(r, ch.Length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.CICP.ColorPrimaries, md.CICP.TransferCharacteristics = chunk[0], chunk[1]
|
||||
md.CICP.MatrixCoefficients, md.CICP.VideoFullRange = chunk[2], chunk[3]
|
||||
md.CICP.IsSet = true
|
||||
if allMetadataExtracted() {
|
||||
break parseChunks
|
||||
}
|
||||
case chunkTypeacTL:
|
||||
if chunk, err = read_chunk(r, ch.Length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.HasFrames = true
|
||||
var num_frames, num_plays uint32
|
||||
if err = decode(&num_frames); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = decode(&num_plays); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.NumFrames, md.NumPlays = int(num_frames), int(num_plays)
|
||||
if allMetadataExtracted() {
|
||||
break parseChunks
|
||||
}
|
||||
|
||||
case chunkTypeiCCP:
|
||||
if chunk, err = read_chunk(r, ch.Length); err != nil {
|
||||
return nil, err
|
||||
|
||||
73
vendor/github.com/kovidgoyal/imaging/prism/meta/tiffmeta/tiffmeta.go
generated
vendored
Normal file
73
vendor/github.com/kovidgoyal/imaging/prism/meta/tiffmeta/tiffmeta.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
package tiffmeta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image/color"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
"golang.org/x/image/tiff"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
func BitsPerComponent(c color.Model) uint32 {
|
||||
switch c {
|
||||
case color.RGBAModel, color.NRGBAModel, color.YCbCrModel, color.CMYKModel:
|
||||
return 8
|
||||
case color.GrayModel:
|
||||
return 8
|
||||
case color.Gray16Model:
|
||||
return 16
|
||||
case color.AlphaModel:
|
||||
return 8
|
||||
case color.Alpha16Model:
|
||||
return 16
|
||||
default:
|
||||
// This handles paletted images and other custom color models.
|
||||
// For a palette, each color in the palette has its own depth.
|
||||
// We can check the bit depth by converting a color from the model to RGBA.
|
||||
// The `Convert` method is part of the color.Model interface.
|
||||
// A fully opaque red color is used for this check.
|
||||
r, g, b, a := c.Convert(color.RGBA{R: 255, A: 255}).RGBA()
|
||||
|
||||
// The values returned by RGBA() are 16-bit alpha-premultiplied values (0-65535).
|
||||
// If the highest value is <= 255, it's an 8-bit model.
|
||||
if r|g|b|a <= 0xff {
|
||||
return 8
|
||||
} else {
|
||||
return 16
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ExtractMetadata(r_ io.Reader) (md *meta.Data, err error) {
|
||||
r := r_.(io.ReadSeeker)
|
||||
pos, err := r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := tiff.DecodeConfig(r)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "malformed header") {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
md = &meta.Data{
|
||||
Format: types.TIFF, PixelWidth: uint32(c.Width), PixelHeight: uint32(c.Height),
|
||||
BitsPerComponent: BitsPerComponent(c.ColorModel),
|
||||
}
|
||||
if _, err = r.Seek(pos, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if e, err := exif.Decode(r); err == nil {
|
||||
md.SetExif(e)
|
||||
} else {
|
||||
md.SetExifError(err)
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
1
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/chunktypes.go
generated
vendored
1
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/chunktypes.go
generated
vendored
@@ -7,4 +7,5 @@ var (
|
||||
chunkTypeVP8L = [4]byte{'V', 'P', '8', 'L'}
|
||||
chunkTypeVP8X = [4]byte{'V', 'P', '8', 'X'}
|
||||
chunkTypeICCP = [4]byte{'I', 'C', 'C', 'P'}
|
||||
chunkTypeEXIF = [4]byte{'E', 'X', 'I', 'F'}
|
||||
)
|
||||
|
||||
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/orientation_2.webp
generated
vendored
Normal file
BIN
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/orientation_2.webp
generated
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 294 B |
66
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/webpmeta.go
generated
vendored
66
vendor/github.com/kovidgoyal/imaging/prism/meta/webpmeta/webpmeta.go
generated
vendored
@@ -7,11 +7,9 @@ import (
|
||||
|
||||
"github.com/kovidgoyal/imaging/prism/meta"
|
||||
"github.com/kovidgoyal/imaging/streams"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
// Format specifies the image format handled by this package
|
||||
var Format = meta.ImageFormat("WebP")
|
||||
|
||||
// Signature is FourCC bytes in the RIFF chunk, "RIFF????WEBP"
|
||||
var webpSignature = [4]byte{'W', 'E', 'B', 'P'}
|
||||
|
||||
@@ -46,7 +44,7 @@ func Load(r io.Reader) (md *meta.Data, imgStream io.Reader, err error) {
|
||||
|
||||
// Same as Load() except that no new stream is provided
|
||||
func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
md = &meta.Data{Format: Format}
|
||||
md = &meta.Data{Format: types.WEBP}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -54,8 +52,10 @@ func ExtractMetadata(r io.Reader) (md *meta.Data, err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := verifySignature(r); err != nil {
|
||||
if is_webp, err := verifySignature(r); err != nil {
|
||||
return nil, err
|
||||
} else if !is_webp {
|
||||
return nil, nil
|
||||
}
|
||||
format, chunkLen, err := readWebPFormat(r)
|
||||
if err != nil {
|
||||
@@ -131,15 +131,25 @@ func parseWebpExtended(r io.Reader, md *meta.Data, chunkLen uint32) error {
|
||||
return err
|
||||
}
|
||||
hasProfile := h[0]&(1<<5) != 0
|
||||
hasExif := h[0]&(1<<3) != 0
|
||||
animated := h[0]&(1<<1) != 0
|
||||
h = h[4:]
|
||||
w := uint32(h[0]) | uint32(h[1])<<8 | uint32(h[2])<<16
|
||||
ht := uint32(h[3]) | uint32(h[4])<<8 | uint32(h[5])<<16
|
||||
md.PixelWidth = w + 1
|
||||
md.PixelHeight = ht + 1
|
||||
md.BitsPerComponent = bitsPerComponent
|
||||
md.HasFrames = animated
|
||||
if !hasProfile && !hasExif {
|
||||
return nil
|
||||
}
|
||||
if err := skip(r, chunkLen-10); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasProfile {
|
||||
data, err := readICCP(r, chunkLen)
|
||||
// ICCP must be next
|
||||
data, err := readICCP(r)
|
||||
if err != nil {
|
||||
md.SetICCProfileError(err)
|
||||
} else {
|
||||
@@ -147,15 +157,35 @@ func parseWebpExtended(r io.Reader, md *meta.Data, chunkLen uint32) error {
|
||||
}
|
||||
}
|
||||
|
||||
if hasExif {
|
||||
for {
|
||||
ch, err := readChunkHeader(r)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if ch.ChunkType == chunkTypeEXIF {
|
||||
data := make([]byte, ch.Length)
|
||||
if _, err := io.ReadFull(r, data); err != nil {
|
||||
return err
|
||||
}
|
||||
md.SetExifData(data)
|
||||
break
|
||||
} else {
|
||||
if err = skip(r, ch.Length); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readICCP(r io.Reader, chunkLen uint32) ([]byte, error) {
|
||||
// Skip to the end of the chunk.
|
||||
if err := skip(r, chunkLen-10); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func readICCP(r io.Reader) ([]byte, error) {
|
||||
// ICCP _must_ be the next chunk.
|
||||
ch, err := readChunkHeader(r)
|
||||
if err != nil {
|
||||
@@ -173,22 +203,22 @@ func readICCP(r io.Reader, chunkLen uint32) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func verifySignature(r io.Reader) error {
|
||||
func verifySignature(r io.Reader) (bool, error) {
|
||||
ch, err := readChunkHeader(r)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
if ch.ChunkType != chunkTypeRIFF {
|
||||
return errors.New("missing RIFF header")
|
||||
return false, nil
|
||||
}
|
||||
var fourcc [4]byte
|
||||
if _, err := io.ReadFull(r, fourcc[:]); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
if fourcc != webpSignature {
|
||||
return errors.New("not a WEBP file")
|
||||
return false, nil
|
||||
}
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func readWebPFormat(r io.Reader) (format webpFormat, length uint32, err error) {
|
||||
|
||||
5
vendor/github.com/kovidgoyal/imaging/publish.py
generated
vendored
5
vendor/github.com/kovidgoyal/imaging/publish.py
generated
vendored
@@ -5,7 +5,7 @@ import os
|
||||
import subprocess
|
||||
|
||||
|
||||
VERSION = "1.7.2"
|
||||
VERSION = "1.8.17"
|
||||
|
||||
|
||||
def run(*args: str):
|
||||
@@ -22,7 +22,8 @@ def main():
|
||||
ans = 'n'
|
||||
if ans.lower() != 'y':
|
||||
return
|
||||
os.environ['GITHUB_TOKEN'] = open(os.path.join(os.environ['PENV'], 'github-token')).read().strip().partition(':')[2]
|
||||
os.environ['GITHUB_TOKEN'] = open(os.path.join(
|
||||
os.environ['PENV'], 'github-token')).read().strip().partition(':')[2]
|
||||
run('git', 'tag', '-a', 'v' + version, '-m', f'version {version}')
|
||||
run('git', 'push')
|
||||
run('goreleaser', 'release', '--clean')
|
||||
|
||||
231
vendor/github.com/kovidgoyal/imaging/resize.go
generated
vendored
231
vendor/github.com/kovidgoyal/imaging/resize.go
generated
vendored
@@ -3,6 +3,9 @@ package imaging
|
||||
import (
|
||||
"image"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
type indexWeight struct {
|
||||
@@ -12,26 +15,17 @@ type indexWeight struct {
|
||||
|
||||
func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight {
|
||||
du := float64(srcSize) / float64(dstSize)
|
||||
scale := du
|
||||
if scale < 1.0 {
|
||||
scale = 1.0
|
||||
}
|
||||
scale := max(1.0, du)
|
||||
ru := math.Ceil(scale * filter.Support)
|
||||
|
||||
out := make([][]indexWeight, dstSize)
|
||||
tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2)
|
||||
|
||||
for v := 0; v < dstSize; v++ {
|
||||
for v := range dstSize {
|
||||
fu := (float64(v)+0.5)*du - 0.5
|
||||
|
||||
begin := int(math.Ceil(fu - ru))
|
||||
if begin < 0 {
|
||||
begin = 0
|
||||
}
|
||||
end := int(math.Floor(fu + ru))
|
||||
if end > srcSize-1 {
|
||||
end = srcSize - 1
|
||||
}
|
||||
begin := max(0, int(math.Ceil(fu-ru)))
|
||||
end := min(int(math.Floor(fu+ru)), srcSize-1)
|
||||
|
||||
var sum float64
|
||||
for u := begin; u <= end; u++ {
|
||||
@@ -56,23 +50,27 @@ func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWei
|
||||
|
||||
// Resize resizes the image to the specified width and height using the specified resampling
|
||||
// filter and returns the transformed image. If one of width or height is 0, the image aspect
|
||||
// ratio is preserved.
|
||||
// ratio is preserved. When is_opaque is true, returns a nrgb.Image otherwise
|
||||
// an image.NRGBA. When the image size is unchanged returns a clone with the
|
||||
// same image type.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
|
||||
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
func ResizeWithOpacity(img image.Image, width, height int, filter ResampleFilter, is_opaque bool) image.Image {
|
||||
dstW, dstH := width, height
|
||||
if dstW < 0 || dstH < 0 {
|
||||
if dstW < 0 || dstH < 0 || (dstW == 0 && dstH == 0) {
|
||||
if is_opaque {
|
||||
return &NRGB{}
|
||||
}
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
if dstW == 0 && dstH == 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
srcW := img.Bounds().Dx()
|
||||
srcH := img.Bounds().Dy()
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
if is_opaque {
|
||||
return &NRGB{}
|
||||
}
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
@@ -87,32 +85,84 @@ func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NR
|
||||
}
|
||||
|
||||
if srcW == dstW && srcH == dstH {
|
||||
return Clone(img)
|
||||
return ClonePreservingType(img)
|
||||
}
|
||||
|
||||
if filter.Support <= 0 {
|
||||
// Nearest-neighbor special case.
|
||||
return resizeNearest(img, dstW, dstH)
|
||||
if is_opaque {
|
||||
return resizeNearest(img, dstW, dstH)
|
||||
}
|
||||
return resizeNearestWithAlpha(img, dstW, dstH)
|
||||
}
|
||||
|
||||
hr := func(img image.Image, dim int) image.Image {
|
||||
if is_opaque {
|
||||
return resizeHorizontal(img, dim, filter)
|
||||
}
|
||||
return resizeHorizontalWithAlpha(img, dim, filter)
|
||||
}
|
||||
vr := func(img image.Image, dim int) image.Image {
|
||||
if is_opaque {
|
||||
return resizeVertical(img, dim, filter)
|
||||
}
|
||||
return resizeVerticalWithAlpha(img, dim, filter)
|
||||
}
|
||||
|
||||
if srcW != dstW && srcH != dstH {
|
||||
return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter)
|
||||
return vr(hr(img, dstW), dstH)
|
||||
}
|
||||
if srcW != dstW {
|
||||
return resizeHorizontal(img, dstW, filter)
|
||||
return hr(img, dstW)
|
||||
}
|
||||
return resizeVertical(img, dstH, filter)
|
||||
|
||||
return vr(img, dstH)
|
||||
}
|
||||
|
||||
func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, src.h))
|
||||
weights := precomputeWeights(width, src.w, filter)
|
||||
func Resize(img image.Image, width, height int, filter ResampleFilter) image.Image {
|
||||
return ResizeWithOpacity(img, width, height, filter, false)
|
||||
}
|
||||
|
||||
func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *nrgb.Image {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgb.NewNRGBScanner(img, nrgb.Color{})
|
||||
dst := nrgb.NewNRGB(image.Rect(0, 0, width, h).Add(img.Bounds().Min))
|
||||
weights := precomputeWeights(width, w, filter)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLine := make([]uint8, w*3)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, src.w, y+1, scanLine)
|
||||
src.Scan(0, y, w, y+1, scanLine)
|
||||
j0 := y * dst.Stride
|
||||
for x := range weights {
|
||||
var r, g, b float64
|
||||
for _, w := range weights[x] {
|
||||
i := w.index * 3
|
||||
s := scanLine[i : i+3 : i+3]
|
||||
r += float64(s[0]) * w.weight
|
||||
g += float64(s[1]) * w.weight
|
||||
b += float64(s[2]) * w.weight
|
||||
}
|
||||
j := j0 + x*3
|
||||
d := dst.Pix[j : j+3 : j+3]
|
||||
d[0] = clamp(r)
|
||||
d[1] = clamp(g)
|
||||
d[2] = clamp(b)
|
||||
}
|
||||
}
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func resizeHorizontalWithAlpha(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, h).Add(img.Bounds().Min))
|
||||
weights := precomputeWeights(width, w, filter)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, w*4)
|
||||
for y := start; y < limit; y++ {
|
||||
src.Scan(0, y, w, y+1, scanLine)
|
||||
j0 := y * dst.Stride
|
||||
for x := range weights {
|
||||
var r, g, b, a float64
|
||||
@@ -136,20 +186,52 @@ func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, height))
|
||||
weights := precomputeWeights(height, src.h, filter)
|
||||
func resizeVertical(img image.Image, height int, filter ResampleFilter) *nrgb.Image {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgb.NewNRGBScanner(img, nrgb.Color{})
|
||||
dst := nrgb.NewNRGB(image.Rect(0, 0, w, height).Add(img.Bounds().Min))
|
||||
weights := precomputeWeights(height, h, filter)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, src.h*4)
|
||||
scanLine := make([]uint8, h*3)
|
||||
for x := start; x < limit; x++ {
|
||||
src.Scan(x, 0, x+1, src.h, scanLine)
|
||||
src.Scan(x, 0, x+1, h, scanLine)
|
||||
for y := range weights {
|
||||
var r, g, b float64
|
||||
for _, w := range weights[y] {
|
||||
i := w.index * 3
|
||||
s := scanLine[i : i+3 : i+3]
|
||||
r += float64(s[0]) * w.weight
|
||||
g += float64(s[1]) * w.weight
|
||||
b += float64(s[2]) * w.weight
|
||||
}
|
||||
j := y*dst.Stride + x*3
|
||||
d := dst.Pix[j : j+3 : j+3]
|
||||
d[0] = clamp(r)
|
||||
d[1] = clamp(g)
|
||||
d[2] = clamp(b)
|
||||
}
|
||||
}
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func resizeVerticalWithAlpha(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, height).Add(img.Bounds().Min))
|
||||
weights := precomputeWeights(height, h, filter)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, h*4)
|
||||
for x := start; x < limit; x++ {
|
||||
src.Scan(x, 0, x+1, h, scanLine)
|
||||
for y := range weights {
|
||||
var r, g, b, a float64
|
||||
for _, w := range weights[y] {
|
||||
@@ -172,20 +254,20 @@ func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.N
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 0, src.w); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// resizeNearest is a fast nearest-neighbor resize, no filtering.
|
||||
func resizeNearest(img image.Image, width, height int) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, height))
|
||||
func resizeNearestWithAlpha(img image.Image, width, height int) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, height).Add(img.Bounds().Min))
|
||||
dx := float64(img.Bounds().Dx()) / float64(width)
|
||||
dy := float64(img.Bounds().Dy()) / float64(height)
|
||||
|
||||
if dx > 1 && dy > 1 {
|
||||
src := newScanner(img)
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
srcY := int((float64(y) + 0.5) * dy)
|
||||
@@ -220,17 +302,58 @@ func resizeNearest(img image.Image, width, height int) *image.NRGBA {
|
||||
return dst
|
||||
}
|
||||
|
||||
func resizeNearest(img image.Image, width, height int) *nrgb.Image {
|
||||
dst := nrgb.NewNRGB(image.Rect(0, 0, width, height).Add(img.Bounds().Min))
|
||||
dx := float64(img.Bounds().Dx()) / float64(width)
|
||||
dy := float64(img.Bounds().Dy()) / float64(height)
|
||||
|
||||
if dx > 1 && dy > 1 {
|
||||
src := nrgb.NewNRGBScanner(img, nrgb.Color{})
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
srcY := int((float64(y) + 0.5) * dy)
|
||||
dstOff := y * dst.Stride
|
||||
for x := range width {
|
||||
srcX := int((float64(x) + 0.5) * dx)
|
||||
src.Scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+3])
|
||||
dstOff += 3
|
||||
}
|
||||
}
|
||||
}, 0, height); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
src := AsNRGB(img)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
srcY := int((float64(y) + 0.5) * dy)
|
||||
srcOff0 := srcY * src.Stride
|
||||
dstOff := y * dst.Stride
|
||||
for x := range width {
|
||||
srcX := int((float64(x) + 0.5) * dx)
|
||||
srcOff := srcOff0 + srcX*3
|
||||
copy(dst.Pix[dstOff:dstOff+3], src.Pix[srcOff:srcOff+3])
|
||||
dstOff += 3
|
||||
}
|
||||
}
|
||||
}, 0, height); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Fit scales down the image using the specified resample filter to fit the specified
|
||||
// maximum width and height and returns the transformed image.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
|
||||
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
func Fit(img image.Image, width, height int, filter ResampleFilter) image.Image {
|
||||
maxW, maxH := width, height
|
||||
|
||||
if maxW <= 0 || maxH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
return &NRGB{}
|
||||
}
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
@@ -238,11 +361,11 @@ func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA
|
||||
srcH := srcBounds.Dy()
|
||||
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
return &NRGB{}
|
||||
}
|
||||
|
||||
if srcW <= maxW && srcH <= maxH {
|
||||
return Clone(img)
|
||||
return ClonePreservingType(img)
|
||||
}
|
||||
|
||||
srcAspectRatio := float64(srcW) / float64(srcH)
|
||||
@@ -266,11 +389,11 @@ func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
|
||||
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
|
||||
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) image.Image {
|
||||
dstW, dstH := width, height
|
||||
|
||||
if dstW <= 0 || dstH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
return &NRGB{}
|
||||
}
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
@@ -278,11 +401,11 @@ func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilt
|
||||
srcH := srcBounds.Dy()
|
||||
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
return &NRGB{}
|
||||
}
|
||||
|
||||
if srcW == dstW && srcH == dstH {
|
||||
return Clone(img)
|
||||
return ClonePreservingType(img)
|
||||
}
|
||||
|
||||
if srcW >= 100 && srcH >= 100 {
|
||||
@@ -295,7 +418,7 @@ func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilt
|
||||
// the given anchor point, then scales it to the specified dimensions and returns the transformed image.
|
||||
//
|
||||
// This is generally faster than resizing first, but may result in inaccuracies when used on small source images.
|
||||
func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
|
||||
func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) image.Image {
|
||||
dstW, dstH := width, height
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
@@ -328,7 +451,7 @@ func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter Res
|
||||
srcAspectRatio := float64(srcW) / float64(srcH)
|
||||
dstAspectRatio := float64(dstW) / float64(dstH)
|
||||
|
||||
var tmp *image.NRGBA
|
||||
var tmp image.Image
|
||||
if srcAspectRatio < dstAspectRatio {
|
||||
tmp = Resize(img, dstW, 0, filter)
|
||||
} else {
|
||||
@@ -344,7 +467,7 @@ func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter Res
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
|
||||
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) image.Image {
|
||||
return Fill(img, width, height, Center, filter)
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/kovidgoyal/imaging/session.vim
generated
vendored
14
vendor/github.com/kovidgoyal/imaging/session.vim
generated
vendored
@@ -1,2 +1,14 @@
|
||||
" Empty for the moment
|
||||
|
||||
lua << EOF
|
||||
local capabilities = require("cmp_nvim_lsp").default_capabilities()
|
||||
local lspconfig = require('lspconfig')
|
||||
lspconfig.gopls.setup({
|
||||
capabilities = capabilities,
|
||||
settings = {
|
||||
gopls = {
|
||||
buildFlags = { "-tags=lcms2cgo" },
|
||||
directoryFilters = { "-.git", "-bypy/b", "-build", "-dist", }
|
||||
}
|
||||
}
|
||||
})
|
||||
EOF
|
||||
|
||||
8
vendor/github.com/kovidgoyal/imaging/to-frames
generated
vendored
Normal file
8
vendor/github.com/kovidgoyal/imaging/to-frames
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
d = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cmd', 'frames')
|
||||
os.execlp('go', 'go', 'run', d, *sys.argv[1:])
|
||||
8
vendor/github.com/kovidgoyal/imaging/to-png
generated
vendored
Normal file
8
vendor/github.com/kovidgoyal/imaging/to-png
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
d = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cmd', 'demo')
|
||||
os.execlp('go', 'go', 'run', d, *sys.argv[1:])
|
||||
274
vendor/github.com/kovidgoyal/imaging/tools.go
generated
vendored
274
vendor/github.com/kovidgoyal/imaging/tools.go
generated
vendored
@@ -2,11 +2,19 @@ package imaging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
|
||||
// New creates a new image with the specified width and height, and fills it with the specified color.
|
||||
func New(width, height int, fillColor color.Color) *image.NRGBA {
|
||||
if width <= 0 || height <= 0 {
|
||||
@@ -27,20 +35,219 @@ func New(width, height int, fillColor color.Color) *image.NRGBA {
|
||||
|
||||
// Clone returns a copy of the given image.
|
||||
func Clone(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
size := src.w * 4
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
size := w * 4
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, src.w, y+1, dst.Pix[i:i+size])
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+size])
|
||||
}
|
||||
}, 0, src.h); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func ClonePreservingOrigin(img image.Image) *image.NRGBA {
|
||||
w, h := img.Bounds().Dx(), img.Bounds().Dy()
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(img.Bounds())
|
||||
size := w * 4
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
i := y * dst.Stride
|
||||
src.Scan(0, y, w, y+1, dst.Pix[i:i+size])
|
||||
}
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func AsNRGBA(src image.Image) *image.NRGBA {
|
||||
if nrgba, ok := src.(*image.NRGBA); ok {
|
||||
return nrgba
|
||||
}
|
||||
return ClonePreservingOrigin(src)
|
||||
}
|
||||
|
||||
func AsNRGB(src image.Image) *NRGB {
|
||||
if nrgb, ok := src.(*NRGB); ok {
|
||||
return nrgb
|
||||
}
|
||||
sc := nrgb.NewNRGBScanner(src, nrgb.Color{})
|
||||
dst := sc.NewImage(src.Bounds()).(*nrgb.Image)
|
||||
w, h := src.Bounds().Dx(), src.Bounds().Dy()
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
sc.ScanRow(0, y, w, y+1, dst, y)
|
||||
}
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Clone an image preserving it's type for all known image types or returning an NRGBA64 image otherwise
|
||||
func ClonePreservingType(src image.Image) image.Image {
|
||||
switch src := src.(type) {
|
||||
case *image.RGBA:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.RGBA64:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.NRGBA:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *NRGB:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.NRGBA64:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.Gray:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.Gray16:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.Alpha:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.Alpha16:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.CMYK:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
return &dst
|
||||
case *image.Paletted:
|
||||
dst := *src
|
||||
dst.Pix = slices.Clone(src.Pix)
|
||||
dst.Palette = slices.Clone(src.Palette)
|
||||
return &dst
|
||||
case *image.YCbCr:
|
||||
dst := *src
|
||||
dst.Y = slices.Clone(src.Y)
|
||||
dst.Cb = slices.Clone(src.Cb)
|
||||
dst.Cr = slices.Clone(src.Cr)
|
||||
return &dst
|
||||
case *image.NYCbCrA:
|
||||
dst := *src
|
||||
dst.Y = slices.Clone(src.Y)
|
||||
dst.Cb = slices.Clone(src.Cb)
|
||||
dst.Cr = slices.Clone(src.Cr)
|
||||
dst.A = slices.Clone(src.A)
|
||||
return &dst
|
||||
// For any other image type, fall back to a generic copy.
|
||||
// This creates an NRGBA image, which may not be the original type,
|
||||
// but ensures the image data is preserved.
|
||||
default:
|
||||
b := src.Bounds()
|
||||
dst := image.NewNRGBA64(b)
|
||||
draw.Draw(dst, b, src, b.Min, draw.Src)
|
||||
return dst
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure image has origin at (0, 0). Note that this destroys the original
|
||||
// image and returns a new image with the same data, but origin shifted.
|
||||
func NormalizeOrigin(src image.Image) image.Image {
|
||||
r := src.Bounds()
|
||||
if r.Min.X == 0 && r.Min.Y == 0 {
|
||||
return src
|
||||
}
|
||||
r = image.Rect(0, 0, r.Dx(), r.Dy())
|
||||
switch src := src.(type) {
|
||||
case *image.RGBA:
|
||||
dst := *src
|
||||
*src = image.RGBA{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.RGBA64:
|
||||
dst := *src
|
||||
*src = image.RGBA64{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.NRGBA:
|
||||
dst := *src
|
||||
*src = image.NRGBA{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *NRGB:
|
||||
dst := *src
|
||||
*src = NRGB{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.NRGBA64:
|
||||
dst := *src
|
||||
*src = image.NRGBA64{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.Gray:
|
||||
dst := *src
|
||||
*src = image.Gray{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.Gray16:
|
||||
dst := *src
|
||||
*src = image.Gray16{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.Alpha:
|
||||
dst := *src
|
||||
*src = image.Alpha{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.Alpha16:
|
||||
dst := *src
|
||||
*src = image.Alpha16{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.CMYK:
|
||||
dst := *src
|
||||
*src = image.CMYK{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.Paletted:
|
||||
dst := *src
|
||||
*src = image.Paletted{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.YCbCr:
|
||||
dst := *src
|
||||
*src = image.YCbCr{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
case *image.NYCbCrA:
|
||||
dst := *src
|
||||
*src = image.NYCbCrA{}
|
||||
dst.Rect = r
|
||||
return &dst
|
||||
// For any other image type, fall back to a generic copy.
|
||||
// This creates an NRGBA image, which may not be the original type,
|
||||
// but ensures the image data is preserved.
|
||||
default:
|
||||
b := src.Bounds()
|
||||
dst := image.NewNRGBA64(b)
|
||||
draw.Draw(dst, b, src, b.Min, draw.Src)
|
||||
dst.Rect = r
|
||||
return dst
|
||||
}
|
||||
}
|
||||
|
||||
// Anchor is the anchor point for image alignment.
|
||||
type Anchor int
|
||||
|
||||
@@ -102,7 +309,7 @@ func Crop(img image.Image, rect image.Rectangle) *image.NRGBA {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
src := newScanner(img)
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy()))
|
||||
rowSize := r.Dx() * 4
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
@@ -145,7 +352,7 @@ func Paste(background, img image.Image, pos image.Point) *image.NRGBA {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
src := newScanner(img)
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
x1 := interRect.Min.X - pasteRect.Min.X
|
||||
@@ -199,7 +406,7 @@ func Overlay(background, img image.Image, pos image.Point, opacity float64) *ima
|
||||
if interRect.Empty() {
|
||||
return dst
|
||||
}
|
||||
src := newScanner(img)
|
||||
src := nrgba.NewNRGBAScanner(img)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
scanLine := make([]uint8, interRect.Dx()*4)
|
||||
for y := start; y < limit; y++ {
|
||||
@@ -262,3 +469,54 @@ func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA {
|
||||
|
||||
return Overlay(background, img, image.Point{x0, y0}, opacity)
|
||||
}
|
||||
|
||||
// Paste the image onto the specified background color.
|
||||
func PasteOntoBackground(img image.Image, bg color.Color) image.Image {
|
||||
if IsOpaque(img) {
|
||||
return img
|
||||
}
|
||||
_, _, _, a := bg.RGBA()
|
||||
bg_is_opaque := a == 0xffff
|
||||
var base draw.Image
|
||||
if bg_is_opaque {
|
||||
// use premult as its faster and will be converted to NRGB anyway
|
||||
base = image.NewRGBA(img.Bounds())
|
||||
} else {
|
||||
base = image.NewNRGBA(img.Bounds())
|
||||
}
|
||||
bgi := image.NewUniform(bg)
|
||||
draw.Draw(base, base.Bounds(), bgi, image.Point{}, draw.Src)
|
||||
draw.Draw(base, base.Bounds(), img, img.Bounds().Min, draw.Over)
|
||||
if bg_is_opaque {
|
||||
return AsNRGB(base)
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// Return contiguous non-premultiplied RGB pixel data for this image with 8 bits per channel
|
||||
func AsRGBData8(img image.Image) (pix []uint8) {
|
||||
b := img.Bounds()
|
||||
n := AsNRGB(img)
|
||||
if n.Stride == b.Dx()*3 {
|
||||
return n.Pix
|
||||
}
|
||||
pix = make([]uint8, 0, b.Dx()*b.Dy()*3)
|
||||
for y := range b.Dy() {
|
||||
pix = append(pix, n.Pix[y*n.Stride:y*(n.Stride+1)]...)
|
||||
}
|
||||
return pix
|
||||
}
|
||||
|
||||
// Return contiguous non-premultiplied RGBA pixel data for this image with 8 bits per channel
|
||||
func AsRGBAData8(img image.Image) (pix []uint8) {
|
||||
b := img.Bounds()
|
||||
n := AsNRGBA(img)
|
||||
if n.Stride == b.Dx()*4 {
|
||||
return n.Pix
|
||||
}
|
||||
pix = make([]uint8, 0, b.Dx()*b.Dy()*4)
|
||||
for y := range b.Dy() {
|
||||
pix = append(pix, n.Pix[y*n.Stride:y*(n.Stride+1)]...)
|
||||
}
|
||||
return pix
|
||||
}
|
||||
|
||||
201
vendor/github.com/kovidgoyal/imaging/transform.go
generated
vendored
201
vendor/github.com/kovidgoyal/imaging/transform.go
generated
vendored
@@ -1,157 +1,174 @@
|
||||
package imaging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
|
||||
"github.com/kovidgoyal/imaging/nrgb"
|
||||
"github.com/kovidgoyal/imaging/nrgba"
|
||||
"github.com/kovidgoyal/imaging/types"
|
||||
)
|
||||
|
||||
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
|
||||
func FlipH(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstY
|
||||
src.Scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
var _ = fmt.Println
|
||||
|
||||
type Scanner = types.Scanner
|
||||
type NRGB = nrgb.Image
|
||||
type NRGBColor = nrgb.Color
|
||||
|
||||
func ScannerForImage(img image.Image) Scanner {
|
||||
switch img := img.(type) {
|
||||
case *NRGB, *image.CMYK, *image.YCbCr, *image.Gray:
|
||||
return nrgb.NewNRGBScanner(img, NRGBColor{})
|
||||
case *image.Paletted:
|
||||
for _, x := range img.Palette {
|
||||
_, _, _, a := x.RGBA()
|
||||
if a < 0xffff {
|
||||
return nrgba.NewNRGBAScanner(img)
|
||||
}
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
return nrgb.NewNRGBScanner(img, NRGBColor{})
|
||||
}
|
||||
return nrgba.NewNRGBAScanner(img)
|
||||
}
|
||||
|
||||
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
|
||||
func FlipH(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(b)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for y := start; y < limit; y++ {
|
||||
sc.ScanRow(0, y, w, y+1, ans, y)
|
||||
sc.ReverseRow(ans, y)
|
||||
}
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
|
||||
func FlipV(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func FlipV(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(b)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstH - dstY - 1
|
||||
src.Scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
srcY := h - y - 1
|
||||
sc.ScanRow(0, srcY, w, srcY+1, ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
func swap_width_height(r image.Rectangle) image.Rectangle {
|
||||
return image.Rectangle{r.Min, image.Point{r.Min.X + r.Dy(), r.Min.Y + r.Dx()}}
|
||||
}
|
||||
|
||||
// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
|
||||
func Transpose(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func Transpose(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(swap_width_height(b))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstY
|
||||
src.Scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
// scan yth column from src into yth row in dest
|
||||
sc.ScanRow(y, 0, y+1, h, ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
|
||||
func Transverse(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func Transverse(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(swap_width_height(b))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstH - dstY - 1
|
||||
src.Scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
// scan width-yth column from src into yth row in dest
|
||||
x := w - y - 1
|
||||
sc.ScanRow(x, 0, x+1, h, ans, y)
|
||||
sc.ReverseRow(ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate90(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func Rotate90(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(swap_width_height(b))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstH - dstY - 1
|
||||
src.Scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
// scan width-yth column from src into yth row in dest
|
||||
x := w - y - 1
|
||||
sc.ScanRow(x, 0, x+1, h, ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate180(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func Rotate180(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(b)
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstH - dstY - 1
|
||||
src.Scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
srcY := h - y - 1
|
||||
sc.ScanRow(0, srcY, w, srcY+1, ans, y)
|
||||
sc.ReverseRow(ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate270(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
func Rotate270(img image.Image) (ans image.Image) {
|
||||
sc := ScannerForImage(img)
|
||||
b := img.Bounds()
|
||||
w, h := b.Dx(), b.Dy()
|
||||
ans = sc.NewImage(swap_width_height(b))
|
||||
if err := run_in_parallel_over_range(0, func(start, limit int) {
|
||||
for dstY := start; dstY < limit; dstY++ {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstY
|
||||
src.Scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
for y := start; y < limit; y++ {
|
||||
sc.ScanRow(y, 0, y+1, h, ans, y)
|
||||
sc.ReverseRow(ans, y)
|
||||
}
|
||||
}, 0, dstH); err != nil {
|
||||
}, 0, w); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dst
|
||||
return
|
||||
}
|
||||
|
||||
// Rotate rotates an image by the given angle counter-clockwise .
|
||||
// The angle parameter is the rotation angle in degrees.
|
||||
// The bgColor parameter specifies the color of the uncovered zone after the rotation.
|
||||
func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
|
||||
func Rotate(img image.Image, angle float64, bgColor color.Color) image.Image {
|
||||
angle = angle - math.Floor(angle/360)*360
|
||||
|
||||
switch angle {
|
||||
case 0:
|
||||
return Clone(img)
|
||||
return ClonePreservingType(img)
|
||||
case 90:
|
||||
return Rotate90(img)
|
||||
case 180:
|
||||
|
||||
106
vendor/github.com/kovidgoyal/imaging/types/types.go
generated
vendored
Normal file
106
vendor/github.com/kovidgoyal/imaging/types/types.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"image"
|
||||
"io"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
// Format is an image file format.
|
||||
type Format int
|
||||
|
||||
type TransformType int
|
||||
|
||||
const (
|
||||
NoTransform TransformType = iota
|
||||
FlipHTransform
|
||||
FlipVTransform
|
||||
Rotate90Transform
|
||||
Rotate180Transform
|
||||
Rotate270Transform
|
||||
TransverseTransform
|
||||
TransposeTransform
|
||||
)
|
||||
|
||||
// Image file formats.
|
||||
const (
|
||||
UNKNOWN Format = iota
|
||||
JPEG
|
||||
PNG
|
||||
GIF
|
||||
TIFF
|
||||
WEBP
|
||||
BMP
|
||||
PBM
|
||||
PGM
|
||||
PPM
|
||||
PAM
|
||||
)
|
||||
|
||||
var FormatExts = map[string]Format{
|
||||
"jpg": JPEG,
|
||||
"jpeg": JPEG,
|
||||
"png": PNG,
|
||||
"gif": GIF,
|
||||
"tif": TIFF,
|
||||
"tiff": TIFF,
|
||||
"webp": WEBP,
|
||||
"bmp": BMP,
|
||||
"pbm": PBM,
|
||||
"pgm": PGM,
|
||||
"ppm": PPM,
|
||||
"pam": PAM,
|
||||
}
|
||||
|
||||
var formatNames = map[Format]string{
|
||||
JPEG: "JPEG",
|
||||
PNG: "PNG",
|
||||
GIF: "GIF",
|
||||
TIFF: "TIFF",
|
||||
WEBP: "WEBP",
|
||||
BMP: "BMP",
|
||||
PBM: "PBM",
|
||||
PGM: "PGM",
|
||||
PPM: "PPM",
|
||||
PAM: "PAM",
|
||||
}
|
||||
|
||||
func (f Format) String() string {
|
||||
return formatNames[f]
|
||||
}
|
||||
|
||||
func (s Format) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.String())
|
||||
}
|
||||
|
||||
func (s *Format) UnmarshalJSON(data []byte) error {
|
||||
var statusString string
|
||||
if err := json.Unmarshal(data, &statusString); err != nil {
|
||||
return err
|
||||
}
|
||||
for x, ss := range formatNames {
|
||||
if ss == statusString {
|
||||
*s = x
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unknown image format: %s", statusString)
|
||||
}
|
||||
|
||||
type Scanner interface {
|
||||
Scan(x1, y1, x2, y2 int, dst []uint8)
|
||||
ScanRow(x1, y1, x2, y2 int, img image.Image, row int)
|
||||
Bytes_per_channel() int
|
||||
Num_of_channels() int
|
||||
Bounds() image.Rectangle
|
||||
ReverseRow(image.Image, int)
|
||||
NewImage(r image.Rectangle) image.Image
|
||||
}
|
||||
|
||||
type Input struct {
|
||||
Reader io.Reader
|
||||
Path string
|
||||
}
|
||||
18
vendor/github.com/kovidgoyal/imaging/utils.go
generated
vendored
18
vendor/github.com/kovidgoyal/imaging/utils.go
generated
vendored
@@ -49,24 +49,6 @@ func clamp(x float64) uint8 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func reverse(pix []uint8) {
|
||||
if len(pix) <= 4 {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
j := len(pix) - 4
|
||||
for i < j {
|
||||
pi := pix[i : i+4 : i+4]
|
||||
pj := pix[j : j+4 : j+4]
|
||||
pi[0], pj[0] = pj[0], pi[0]
|
||||
pi[1], pj[1] = pj[1], pi[1]
|
||||
pi[2], pj[2] = pj[2], pi[2]
|
||||
pi[3], pj[3] = pj[3], pi[3]
|
||||
i += 4
|
||||
j -= 4
|
||||
}
|
||||
}
|
||||
|
||||
func toNRGBA(img image.Image) *image.NRGBA {
|
||||
if img, ok := img.(*image.NRGBA); ok {
|
||||
return &image.NRGBA{
|
||||
|
||||
209
vendor/github.com/kovidgoyal/imaging/webp/animated.go
generated
vendored
Normal file
209
vendor/github.com/kovidgoyal/imaging/webp/animated.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
package webp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"image"
|
||||
"io"
|
||||
|
||||
"golang.org/x/image/riff"
|
||||
"golang.org/x/image/vp8"
|
||||
"golang.org/x/image/vp8l"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotExtended = errors.New("there was no vp8x header in this webp file, it cannot be animated")
|
||||
errNotAnimated = errors.New("the vp8x header did not have the animation bit set")
|
||||
)
|
||||
|
||||
func decodeAnimated(r io.Reader) (*AnimatedWEBP, error) {
|
||||
riffReader, err := webpRiffReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vp8xHeader, err := validateVP8XHeader(riffReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
animHeader, err := validateANIMHeader(riffReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
awp := AnimatedWEBP{
|
||||
Frames: make([]Frame, 0, 128),
|
||||
Header: animHeader,
|
||||
Config: image.Config{
|
||||
ColorModel: nil, // TODO(patricsss) set the color model correctly
|
||||
Width: int(vp8xHeader.CanvasWidth),
|
||||
Height: int(vp8xHeader.CanvasHeight),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
frame, err := parseFrame(riffReader)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
awp.Frames = append(awp.Frames, *frame)
|
||||
}
|
||||
|
||||
return &awp, nil
|
||||
}
|
||||
|
||||
func validateVP8XHeader(r *riff.Reader) (VP8XHeader, error) {
|
||||
fourCC, chunkLen, chunkData, err := r.Next()
|
||||
if err != nil {
|
||||
return VP8XHeader{}, err
|
||||
}
|
||||
if fourCC != fccVP8X {
|
||||
return VP8XHeader{}, errNotExtended
|
||||
}
|
||||
if chunkLen != 10 {
|
||||
return VP8XHeader{}, errInvalidFormat
|
||||
}
|
||||
|
||||
h := parseVP8XHeader(chunkData)
|
||||
if !h.Animation {
|
||||
return VP8XHeader{}, errNotAnimated
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func validateANIMHeader(r *riff.Reader) (ANIMHeader, error) {
|
||||
fourCC, chunkLen, chunkData, err := r.Next()
|
||||
if err != nil {
|
||||
return ANIMHeader{}, err
|
||||
}
|
||||
if fourCC != fccANIM {
|
||||
return ANIMHeader{}, errInvalidFormat
|
||||
}
|
||||
if chunkLen != 6 {
|
||||
return ANIMHeader{}, errInvalidFormat
|
||||
}
|
||||
|
||||
h := parseANIMHeader(chunkData)
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func parseFrame(r *riff.Reader) (*Frame, error) {
|
||||
fourCC, chunkLen, chunkData, err := r.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fourCC != fccANMF {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
|
||||
anmfHeader := parseANMFHeader(chunkData)
|
||||
|
||||
// buffer chunk data based on chunkLen for safety
|
||||
// TODO(patricsss): establish if this is necessary, perhaps chunkData has a bounds
|
||||
// ANMF headers are 16 bytes
|
||||
wrappedChunkData, err := rewrap(chunkData, int(chunkLen-16))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subReader := NewSubChunkReader(wrappedChunkData)
|
||||
|
||||
var (
|
||||
alpha []byte
|
||||
stride int
|
||||
i *image.YCbCr
|
||||
)
|
||||
|
||||
subFourCC, subChunkData, subChunkLen, err := subReader.Next()
|
||||
if subFourCC == fccALPH {
|
||||
alpha, stride, err = decodeAlpha(subChunkData, int(subChunkLen), anmfHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// read next chunk
|
||||
subFourCC, subChunkData, subChunkLen, err = subReader.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var out image.Image
|
||||
switch subFourCC {
|
||||
case fccVP8:
|
||||
i, err = decodeVp8Bitstream(subChunkData, int(subChunkLen))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(alpha) > 0 {
|
||||
out = &image.NYCbCrA{
|
||||
YCbCr: *i,
|
||||
A: alpha,
|
||||
AStride: stride,
|
||||
}
|
||||
} else {
|
||||
out = i
|
||||
}
|
||||
case fccVP8L:
|
||||
out, err = vp8l.Decode(subChunkData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
|
||||
return &Frame{
|
||||
Header: anmfHeader,
|
||||
Frame: out,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func decodeVp8Bitstream(chunkData io.Reader, chunkLen int) (*image.YCbCr, error) {
|
||||
dec := vp8.NewDecoder()
|
||||
dec.Init(chunkData, chunkLen)
|
||||
|
||||
_, err := dec.DecodeFrameHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i, err := dec.DecodeFrame()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func decodeAlpha(chunkData io.Reader, chunkLen int, h ANMFHeader) (alpha []byte, alphaStride int, err error) {
|
||||
alphHeader := parseALPHHeader(chunkData)
|
||||
// Length of the chunk minus 1 byte for the ALPH header
|
||||
buf := make([]byte, chunkLen-1)
|
||||
n, err := io.ReadFull(chunkData, buf)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if n != len(buf) {
|
||||
return nil, 0, errInvalidFormat
|
||||
}
|
||||
|
||||
alpha, alphaStride, err = readAlpha(bytes.NewReader(buf), h.FrameWidth-1, h.FrameHeight-1, alphHeader.Compression)
|
||||
unfilterAlpha(alpha, alphaStride, alphHeader.FilteringMethod)
|
||||
return alpha, alphaStride, nil
|
||||
}
|
||||
|
||||
func rewrap(r io.Reader, length int) (io.Reader, error) {
|
||||
data := make([]byte, length)
|
||||
n, err := io.ReadFull(r, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != length {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
return bytes.NewReader(data), nil
|
||||
}
|
||||
302
vendor/github.com/kovidgoyal/imaging/webp/decode.go
generated
vendored
Normal file
302
vendor/github.com/kovidgoyal/imaging/webp/decode.go
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
|
||||
"golang.org/x/image/riff"
|
||||
"golang.org/x/image/vp8"
|
||||
"golang.org/x/image/vp8l"
|
||||
)
|
||||
|
||||
var errInvalidFormat = errors.New("webp: invalid format")
|
||||
|
||||
var (
|
||||
fccANIM = riff.FourCC{'A', 'N', 'I', 'M'}
|
||||
fccANMF = riff.FourCC{'A', 'N', 'M', 'F'}
|
||||
fccALPH = riff.FourCC{'A', 'L', 'P', 'H'}
|
||||
fccVP8 = riff.FourCC{'V', 'P', '8', ' '}
|
||||
fccVP8L = riff.FourCC{'V', 'P', '8', 'L'}
|
||||
fccVP8X = riff.FourCC{'V', 'P', '8', 'X'}
|
||||
fccWEBP = riff.FourCC{'W', 'E', 'B', 'P'}
|
||||
)
|
||||
|
||||
func webpRiffReader(r io.Reader) (*riff.Reader, error) {
|
||||
formType, riffReader, err := riff.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if formType != fccWEBP {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
return riffReader, nil
|
||||
}
|
||||
|
||||
func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) {
|
||||
riffReader, err := webpRiffReader(r)
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
alpha []byte
|
||||
alphaStride int
|
||||
wantAlpha bool
|
||||
seenVP8X bool
|
||||
widthMinusOne uint32
|
||||
heightMinusOne uint32
|
||||
buf [10]byte
|
||||
)
|
||||
for {
|
||||
chunkID, chunkLen, chunkData, err := riffReader.Next()
|
||||
if err == io.EOF {
|
||||
err = errInvalidFormat
|
||||
}
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
|
||||
switch chunkID {
|
||||
case fccALPH:
|
||||
if !wantAlpha {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
wantAlpha = false
|
||||
// Read the Pre-processing | Filter | Compression byte.
|
||||
if _, err := io.ReadFull(chunkData, buf[:1]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = errInvalidFormat
|
||||
}
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
alpha, alphaStride, err = readAlpha(chunkData, widthMinusOne, heightMinusOne, buf[0]&0x03)
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
unfilterAlpha(alpha, alphaStride, (buf[0]>>2)&0x03)
|
||||
|
||||
case fccVP8:
|
||||
if wantAlpha || int32(chunkLen) < 0 {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
d := vp8.NewDecoder()
|
||||
d.Init(chunkData, int(chunkLen))
|
||||
fh, err := d.DecodeFrameHeader()
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
if configOnly {
|
||||
return nil, image.Config{
|
||||
ColorModel: color.YCbCrModel,
|
||||
Width: fh.Width,
|
||||
Height: fh.Height,
|
||||
}, nil
|
||||
}
|
||||
m, err := d.DecodeFrame()
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
if alpha != nil {
|
||||
return &image.NYCbCrA{
|
||||
YCbCr: *m,
|
||||
A: alpha,
|
||||
AStride: alphaStride,
|
||||
}, image.Config{}, nil
|
||||
}
|
||||
return m, image.Config{}, nil
|
||||
|
||||
case fccVP8L:
|
||||
if wantAlpha || alpha != nil {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
if configOnly {
|
||||
c, err := vp8l.DecodeConfig(chunkData)
|
||||
return nil, c, err
|
||||
}
|
||||
m, err := vp8l.Decode(chunkData)
|
||||
return m, image.Config{}, err
|
||||
|
||||
case fccVP8X:
|
||||
if seenVP8X {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
seenVP8X = true
|
||||
if chunkLen != 10 {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
h := parseVP8XHeader(chunkData)
|
||||
wantAlpha = h.Alpha
|
||||
widthMinusOne = h.CanvasWidth - 1
|
||||
heightMinusOne = h.CanvasHeight - 1
|
||||
if configOnly {
|
||||
if wantAlpha {
|
||||
return nil, image.Config{
|
||||
ColorModel: color.NYCbCrAModel,
|
||||
Width: int(widthMinusOne) + 1,
|
||||
Height: int(heightMinusOne) + 1,
|
||||
}, nil
|
||||
}
|
||||
return nil, image.Config{
|
||||
ColorModel: color.YCbCrModel,
|
||||
Width: int(widthMinusOne) + 1,
|
||||
Height: int(heightMinusOne) + 1,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readAlpha(chunkData io.Reader, widthMinusOne, heightMinusOne uint32, compression byte) (
|
||||
alpha []byte, alphaStride int, err error) {
|
||||
|
||||
switch compression {
|
||||
case 0:
|
||||
w := int(widthMinusOne) + 1
|
||||
h := int(heightMinusOne) + 1
|
||||
alpha = make([]byte, w*h)
|
||||
if _, err := io.ReadFull(chunkData, alpha); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return alpha, w, nil
|
||||
|
||||
case 1:
|
||||
// Read the VP8L-compressed alpha values. First, synthesize a 5-byte VP8L header:
|
||||
// a 1-byte magic number, a 14-bit widthMinusOne, a 14-bit heightMinusOne,
|
||||
// a 1-bit (ignored, zero) alphaIsUsed and a 3-bit (zero) version.
|
||||
// TODO(nigeltao): be more efficient than decoding an *image.NRGBA just to
|
||||
// extract the green values to a separately allocated []byte. Fixing this
|
||||
// will require changes to the vp8l package's API.
|
||||
if widthMinusOne > 0x3fff || heightMinusOne > 0x3fff {
|
||||
return nil, 0, errors.New("webp: invalid format")
|
||||
}
|
||||
alphaImage, err := vp8l.Decode(io.MultiReader(
|
||||
bytes.NewReader([]byte{
|
||||
0x2f, // VP8L magic number.
|
||||
uint8(widthMinusOne),
|
||||
uint8(widthMinusOne>>8) | uint8(heightMinusOne<<6),
|
||||
uint8(heightMinusOne >> 2),
|
||||
uint8(heightMinusOne >> 10),
|
||||
}),
|
||||
chunkData,
|
||||
))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// The green values of the inner NRGBA image are the alpha values of the
|
||||
// outer NYCbCrA image.
|
||||
pix := alphaImage.(*image.NRGBA).Pix
|
||||
alpha = make([]byte, len(pix)/4)
|
||||
for i := range alpha {
|
||||
alpha[i] = pix[4*i+1]
|
||||
}
|
||||
return alpha, int(widthMinusOne) + 1, nil
|
||||
}
|
||||
return nil, 0, errInvalidFormat
|
||||
}
|
||||
|
||||
func unfilterAlpha(alpha []byte, alphaStride int, filter byte) {
|
||||
if len(alpha) == 0 || alphaStride == 0 {
|
||||
return
|
||||
}
|
||||
switch filter {
|
||||
case 1: // Horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
for i := alphaStride; i < len(alpha); i += alphaStride {
|
||||
// The first column is equivalent to the vertical filter.
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
|
||||
for j := 1; j < alphaStride; j++ {
|
||||
alpha[i+j] += alpha[i+j-1]
|
||||
}
|
||||
}
|
||||
|
||||
case 2: // Vertical filter.
|
||||
// The first row is equivalent to the horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
|
||||
for i := alphaStride; i < len(alpha); i++ {
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
}
|
||||
|
||||
case 3: // Gradient filter.
|
||||
// The first row is equivalent to the horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
|
||||
for i := alphaStride; i < len(alpha); i += alphaStride {
|
||||
// The first column is equivalent to the vertical filter.
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
|
||||
// The interior is predicted on the three top/left pixels.
|
||||
for j := 1; j < alphaStride; j++ {
|
||||
c := int(alpha[i+j-alphaStride-1])
|
||||
b := int(alpha[i+j-alphaStride])
|
||||
a := int(alpha[i+j-1])
|
||||
x := a + b - c
|
||||
if x < 0 {
|
||||
x = 0
|
||||
} else if x > 255 {
|
||||
x = 255
|
||||
}
|
||||
alpha[i+j] += uint8(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads a WEBP image from r and returns it as an image.Image.
|
||||
func Decode(r io.Reader) (image.Image, error) {
|
||||
m, _, err := decode(r, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func DecodeAnimated(r io.Reader) (*AnimatedWEBP, error) {
|
||||
return decodeAnimated(r)
|
||||
}
|
||||
|
||||
// DecodeConfig returns the color model and dimensions of a WEBP image without
|
||||
// decoding the entire image.
|
||||
func DecodeConfig(r io.Reader) (image.Config, error) {
|
||||
_, c, err := decode(r, true)
|
||||
return c, err
|
||||
}
|
||||
|
||||
// DecodeVP8XHeader will return the decoded VP8XHeader if this file is in the Extended File Format
|
||||
// as defined by the webp specification. The VP8X chunk must be the first chunk of the file.
|
||||
// If the first chunk of the file is anything else, it is not in the extended format and this
|
||||
// will return a nil VP8XHeader. An error is only returned if the chunk is found, but invalid
|
||||
// or a generic io.Reader error occurs.
|
||||
func DecodeVP8XHeader(r io.Reader) (*VP8XHeader, error) {
|
||||
riffReader, err := webpRiffReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fourCC, chunkLen, chunkData, err := riffReader.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fourCC != fccVP8X {
|
||||
return nil, nil
|
||||
}
|
||||
if chunkLen != 10 {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
|
||||
h := parseVP8XHeader(chunkData)
|
||||
return &h, nil
|
||||
}
|
||||
277
vendor/github.com/kovidgoyal/imaging/webp/decode.go.orig
generated
vendored
Normal file
277
vendor/github.com/kovidgoyal/imaging/webp/decode.go.orig
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
|
||||
"golang.org/x/image/riff"
|
||||
"golang.org/x/image/vp8"
|
||||
"golang.org/x/image/vp8l"
|
||||
)
|
||||
|
||||
var errInvalidFormat = errors.New("webp: invalid format")
|
||||
|
||||
var (
|
||||
fccALPH = riff.FourCC{'A', 'L', 'P', 'H'}
|
||||
fccVP8 = riff.FourCC{'V', 'P', '8', ' '}
|
||||
fccVP8L = riff.FourCC{'V', 'P', '8', 'L'}
|
||||
fccVP8X = riff.FourCC{'V', 'P', '8', 'X'}
|
||||
fccWEBP = riff.FourCC{'W', 'E', 'B', 'P'}
|
||||
)
|
||||
|
||||
func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) {
|
||||
formType, riffReader, err := riff.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
if formType != fccWEBP {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
|
||||
var (
|
||||
alpha []byte
|
||||
alphaStride int
|
||||
wantAlpha bool
|
||||
seenVP8X bool
|
||||
widthMinusOne uint32
|
||||
heightMinusOne uint32
|
||||
buf [10]byte
|
||||
)
|
||||
for {
|
||||
chunkID, chunkLen, chunkData, err := riffReader.Next()
|
||||
if err == io.EOF {
|
||||
err = errInvalidFormat
|
||||
}
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
|
||||
switch chunkID {
|
||||
case fccALPH:
|
||||
if !wantAlpha {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
wantAlpha = false
|
||||
// Read the Pre-processing | Filter | Compression byte.
|
||||
if _, err := io.ReadFull(chunkData, buf[:1]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = errInvalidFormat
|
||||
}
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
alpha, alphaStride, err = readAlpha(chunkData, widthMinusOne, heightMinusOne, buf[0]&0x03)
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
unfilterAlpha(alpha, alphaStride, (buf[0]>>2)&0x03)
|
||||
|
||||
case fccVP8:
|
||||
if wantAlpha || int32(chunkLen) < 0 {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
d := vp8.NewDecoder()
|
||||
d.Init(chunkData, int(chunkLen))
|
||||
fh, err := d.DecodeFrameHeader()
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
if configOnly {
|
||||
return nil, image.Config{
|
||||
ColorModel: color.YCbCrModel,
|
||||
Width: fh.Width,
|
||||
Height: fh.Height,
|
||||
}, nil
|
||||
}
|
||||
m, err := d.DecodeFrame()
|
||||
if err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
if alpha != nil {
|
||||
return &image.NYCbCrA{
|
||||
YCbCr: *m,
|
||||
A: alpha,
|
||||
AStride: alphaStride,
|
||||
}, image.Config{}, nil
|
||||
}
|
||||
return m, image.Config{}, nil
|
||||
|
||||
case fccVP8L:
|
||||
if wantAlpha || alpha != nil {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
if configOnly {
|
||||
c, err := vp8l.DecodeConfig(chunkData)
|
||||
return nil, c, err
|
||||
}
|
||||
m, err := vp8l.Decode(chunkData)
|
||||
return m, image.Config{}, err
|
||||
|
||||
case fccVP8X:
|
||||
if seenVP8X {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
seenVP8X = true
|
||||
if chunkLen != 10 {
|
||||
return nil, image.Config{}, errInvalidFormat
|
||||
}
|
||||
if _, err := io.ReadFull(chunkData, buf[:10]); err != nil {
|
||||
return nil, image.Config{}, err
|
||||
}
|
||||
const (
|
||||
animationBit = 1 << 1
|
||||
xmpMetadataBit = 1 << 2
|
||||
exifMetadataBit = 1 << 3
|
||||
alphaBit = 1 << 4
|
||||
iccProfileBit = 1 << 5
|
||||
)
|
||||
wantAlpha = (buf[0] & alphaBit) != 0
|
||||
widthMinusOne = uint32(buf[4]) | uint32(buf[5])<<8 | uint32(buf[6])<<16
|
||||
heightMinusOne = uint32(buf[7]) | uint32(buf[8])<<8 | uint32(buf[9])<<16
|
||||
if configOnly {
|
||||
if wantAlpha {
|
||||
return nil, image.Config{
|
||||
ColorModel: color.NYCbCrAModel,
|
||||
Width: int(widthMinusOne) + 1,
|
||||
Height: int(heightMinusOne) + 1,
|
||||
}, nil
|
||||
}
|
||||
return nil, image.Config{
|
||||
ColorModel: color.YCbCrModel,
|
||||
Width: int(widthMinusOne) + 1,
|
||||
Height: int(heightMinusOne) + 1,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readAlpha(chunkData io.Reader, widthMinusOne, heightMinusOne uint32, compression byte) (
|
||||
alpha []byte, alphaStride int, err error) {
|
||||
|
||||
switch compression {
|
||||
case 0:
|
||||
w := int(widthMinusOne) + 1
|
||||
h := int(heightMinusOne) + 1
|
||||
alpha = make([]byte, w*h)
|
||||
if _, err := io.ReadFull(chunkData, alpha); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return alpha, w, nil
|
||||
|
||||
case 1:
|
||||
// Read the VP8L-compressed alpha values. First, synthesize a 5-byte VP8L header:
|
||||
// a 1-byte magic number, a 14-bit widthMinusOne, a 14-bit heightMinusOne,
|
||||
// a 1-bit (ignored, zero) alphaIsUsed and a 3-bit (zero) version.
|
||||
// TODO(nigeltao): be more efficient than decoding an *image.NRGBA just to
|
||||
// extract the green values to a separately allocated []byte. Fixing this
|
||||
// will require changes to the vp8l package's API.
|
||||
if widthMinusOne > 0x3fff || heightMinusOne > 0x3fff {
|
||||
return nil, 0, errors.New("webp: invalid format")
|
||||
}
|
||||
alphaImage, err := vp8l.Decode(io.MultiReader(
|
||||
bytes.NewReader([]byte{
|
||||
0x2f, // VP8L magic number.
|
||||
uint8(widthMinusOne),
|
||||
uint8(widthMinusOne>>8) | uint8(heightMinusOne<<6),
|
||||
uint8(heightMinusOne >> 2),
|
||||
uint8(heightMinusOne >> 10),
|
||||
}),
|
||||
chunkData,
|
||||
))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// The green values of the inner NRGBA image are the alpha values of the
|
||||
// outer NYCbCrA image.
|
||||
pix := alphaImage.(*image.NRGBA).Pix
|
||||
alpha = make([]byte, len(pix)/4)
|
||||
for i := range alpha {
|
||||
alpha[i] = pix[4*i+1]
|
||||
}
|
||||
return alpha, int(widthMinusOne) + 1, nil
|
||||
}
|
||||
return nil, 0, errInvalidFormat
|
||||
}
|
||||
|
||||
func unfilterAlpha(alpha []byte, alphaStride int, filter byte) {
|
||||
if len(alpha) == 0 || alphaStride == 0 {
|
||||
return
|
||||
}
|
||||
switch filter {
|
||||
case 1: // Horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
for i := alphaStride; i < len(alpha); i += alphaStride {
|
||||
// The first column is equivalent to the vertical filter.
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
|
||||
for j := 1; j < alphaStride; j++ {
|
||||
alpha[i+j] += alpha[i+j-1]
|
||||
}
|
||||
}
|
||||
|
||||
case 2: // Vertical filter.
|
||||
// The first row is equivalent to the horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
|
||||
for i := alphaStride; i < len(alpha); i++ {
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
}
|
||||
|
||||
case 3: // Gradient filter.
|
||||
// The first row is equivalent to the horizontal filter.
|
||||
for i := 1; i < alphaStride; i++ {
|
||||
alpha[i] += alpha[i-1]
|
||||
}
|
||||
|
||||
for i := alphaStride; i < len(alpha); i += alphaStride {
|
||||
// The first column is equivalent to the vertical filter.
|
||||
alpha[i] += alpha[i-alphaStride]
|
||||
|
||||
// The interior is predicted on the three top/left pixels.
|
||||
for j := 1; j < alphaStride; j++ {
|
||||
c := int(alpha[i+j-alphaStride-1])
|
||||
b := int(alpha[i+j-alphaStride])
|
||||
a := int(alpha[i+j-1])
|
||||
x := a + b - c
|
||||
if x < 0 {
|
||||
x = 0
|
||||
} else if x > 255 {
|
||||
x = 255
|
||||
}
|
||||
alpha[i+j] += uint8(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads a WEBP image from r and returns it as an image.Image.
|
||||
func Decode(r io.Reader) (image.Image, error) {
|
||||
m, _, err := decode(r, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// DecodeConfig returns the color model and dimensions of a WEBP image without
|
||||
// decoding the entire image.
|
||||
func DecodeConfig(r io.Reader) (image.Config, error) {
|
||||
_, c, err := decode(r, true)
|
||||
return c, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
image.RegisterFormat("webp", "RIFF????WEBPVP8", Decode, DecodeConfig)
|
||||
}
|
||||
|
||||
73
vendor/github.com/kovidgoyal/imaging/webp/subchunkreader.go
generated
vendored
Normal file
73
vendor/github.com/kovidgoyal/imaging/webp/subchunkreader.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
package webp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"golang.org/x/image/riff"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidHeader = errors.New("could not read an 8 byte header, sub-chunk is not valid")
|
||||
)
|
||||
|
||||
// SubChunkReader helps in reading riff data from an existing chunk which are comprised of sub-chunks.
|
||||
// A good example would be ANMF chunks of animated webp files. These chunks can contain headers, ALPH chunks
|
||||
// and VP8 or VP8L chunks within the main riff data chunk.
|
||||
type SubChunkReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Next will return the FourCC, data, and data length of a subchunk.
|
||||
// The io.Reader returned for data will not be the same as the provided reader
|
||||
// and is safe to discord without fully reading the contents.
|
||||
// Will return an error if the format is invalid or an underlying read operation fails.
|
||||
func (c SubChunkReader) Next() (riff.FourCC, io.Reader, uint32, error) {
|
||||
header := make([]byte, 8)
|
||||
n, err := io.ReadFull(c.r, header)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return riff.FourCC{}, nil, 0, errInvalidHeader
|
||||
}
|
||||
return riff.FourCC{}, nil, 0, err
|
||||
}
|
||||
if n != 8 {
|
||||
return riff.FourCC{}, nil, 0, errInvalidHeader
|
||||
}
|
||||
|
||||
fourCC := riff.FourCC{header[0], header[1], header[2], header[3]}
|
||||
chunkLen := u32(header[4:8])
|
||||
buf := make([]byte, chunkLen)
|
||||
n, err = io.ReadFull(c.r, buf)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return riff.FourCC{}, nil, 0, errInvalidFormat
|
||||
}
|
||||
return riff.FourCC{}, nil, 0, err
|
||||
}
|
||||
if n != int(chunkLen) {
|
||||
return riff.FourCC{}, nil, 0, errInvalidFormat
|
||||
}
|
||||
|
||||
// if chunkLen was odd, we need to maintain a 2-byte boundary per RIFF spec.
|
||||
// in this case read off a single byte of padding to re-align with the next
|
||||
// fourCC header
|
||||
if chunkLen%2 == 1 {
|
||||
n, err := c.r.Read([]byte{0})
|
||||
if err != nil {
|
||||
return riff.FourCC{}, nil, 0, err
|
||||
}
|
||||
if n != 1 {
|
||||
return riff.FourCC{}, nil, 0, errInvalidFormat
|
||||
}
|
||||
}
|
||||
|
||||
return fourCC, bytes.NewReader(buf), chunkLen, nil
|
||||
}
|
||||
|
||||
func NewSubChunkReader(r io.Reader) *SubChunkReader {
|
||||
return &SubChunkReader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/kovidgoyal/imaging/webp/webp.go
generated
vendored
Normal file
138
vendor/github.com/kovidgoyal/imaging/webp/webp.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package webp
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"io"
|
||||
)
|
||||
|
||||
// AnimatedWEBP is the struct of a AnimatedWEBP container and the image data contained within.
|
||||
type AnimatedWEBP struct {
|
||||
Frames []Frame
|
||||
Header ANIMHeader
|
||||
Config image.Config
|
||||
}
|
||||
|
||||
type Frame struct {
|
||||
Header ANMFHeader
|
||||
Frame image.Image
|
||||
}
|
||||
|
||||
type VP8XHeader struct {
|
||||
ICCProfile bool
|
||||
Alpha bool
|
||||
ExifMetadata bool
|
||||
XmpMetadata bool
|
||||
Animation bool
|
||||
CanvasWidth uint32
|
||||
CanvasHeight uint32
|
||||
}
|
||||
|
||||
type ALPHHeader struct {
|
||||
Preprocessing uint8
|
||||
FilteringMethod uint8
|
||||
Compression uint8
|
||||
}
|
||||
|
||||
type ANIMHeader struct {
|
||||
BackgroundColor color.Color
|
||||
LoopCount uint16
|
||||
}
|
||||
|
||||
type ANMFHeader struct {
|
||||
FrameX uint32
|
||||
FrameY uint32
|
||||
FrameWidth uint32
|
||||
FrameHeight uint32
|
||||
FrameDuration uint32
|
||||
AlphaBlend bool
|
||||
DisposalBitSet bool
|
||||
}
|
||||
|
||||
func parseALPHHeader(r io.Reader) ALPHHeader {
|
||||
h := make([]byte, 1)
|
||||
_, _ = io.ReadFull(r, h)
|
||||
|
||||
const (
|
||||
twoBits = byte(3)
|
||||
)
|
||||
|
||||
return ALPHHeader{
|
||||
Preprocessing: h[0] >> 4 & twoBits,
|
||||
FilteringMethod: h[0] >> 2 & twoBits,
|
||||
Compression: h[0] & twoBits,
|
||||
}
|
||||
}
|
||||
|
||||
func parseANIMHeader(r io.Reader) ANIMHeader {
|
||||
h := make([]byte, 6)
|
||||
_, _ = io.ReadFull(r, h)
|
||||
|
||||
loopCount := uint16(h[4]) | uint16(h[5])<<8
|
||||
bg := color.RGBA{
|
||||
R: h[2],
|
||||
G: h[1],
|
||||
B: h[0],
|
||||
A: h[3],
|
||||
}
|
||||
|
||||
return ANIMHeader{
|
||||
BackgroundColor: bg,
|
||||
LoopCount: loopCount,
|
||||
}
|
||||
}
|
||||
|
||||
func parseANMFHeader(r io.Reader) ANMFHeader {
|
||||
h := make([]byte, 16)
|
||||
_, _ = io.ReadFull(r, h)
|
||||
|
||||
const (
|
||||
disposeBit = 1
|
||||
blendBit = 1 << 1
|
||||
)
|
||||
|
||||
return ANMFHeader{
|
||||
FrameX: u24(h[0:3]),
|
||||
FrameY: u24(h[3:6]),
|
||||
FrameWidth: u24(h[6:9]) + 1,
|
||||
FrameHeight: u24(h[9:12]) + 1,
|
||||
FrameDuration: u24(h[12:15]),
|
||||
AlphaBlend: (h[15] & blendBit) == 0,
|
||||
DisposalBitSet: (h[15] & disposeBit) != 0,
|
||||
}
|
||||
}
|
||||
|
||||
func parseVP8XHeader(r io.Reader) VP8XHeader {
|
||||
const (
|
||||
anim = 1 << 1
|
||||
xmp = 1 << 2
|
||||
exif = 1 << 3
|
||||
alpha = 1 << 4
|
||||
icc = 1 << 5
|
||||
)
|
||||
|
||||
h := make([]byte, 10)
|
||||
_, _ = io.ReadFull(r, h)
|
||||
|
||||
widthMinusOne := u24(h[4:])
|
||||
heightMinusOne := u24(h[7:])
|
||||
|
||||
header := VP8XHeader{
|
||||
ICCProfile: h[0]&icc != 0,
|
||||
Alpha: h[0]&alpha != 0,
|
||||
ExifMetadata: h[0]&exif != 0,
|
||||
XmpMetadata: h[0]&xmp != 0,
|
||||
Animation: h[0]&anim != 0,
|
||||
CanvasWidth: widthMinusOne + 1,
|
||||
CanvasHeight: heightMinusOne + 1,
|
||||
}
|
||||
return header
|
||||
}
|
||||
|
||||
func u24(b []byte) uint32 {
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
|
||||
}
|
||||
|
||||
func u32(b []byte) uint32 {
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
Reference in New Issue
Block a user