status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
}) clobbered := replaced.WithMountedDirectory("/mnt", clobberedDir) t.Run("replacing parent of a mount clobbers child", func(t *testing.T) { mnts, err := clobbered.Mounts(ctx) require.NoError(t, err) require.Equal(t, []string{"/mnt"}, mnts) out, err := clobbered.Exec(dagger.ContainerExecOpts{ Args: []string{"cat", "/mnt/some-file"}, }).Stdout().Contents(ctx) require.NoError(t, err) require.Equal(t, "clobbered-content", out) }) clobberedSubDir := c.Directory().WithNewFile("some-file", dagger.DirectoryWithNewFileOpts{ Contents: "clobbered-sub-content", }) clobberedSub := clobbered.WithMountedDirectory("/mnt/dir", clobberedSubDir) t.Run("restoring mount under clobbered mount", func(t *testing.T) { mnts, err := clobberedSub.Mounts(ctx) require.NoError(t, err) require.Equal(t, []string{"/mnt", "/mnt/dir"}, mnts) out, err := clobberedSub.Exec(dagger.ContainerExecOpts{ Args: []string{"cat", "/mnt/dir/some-file"}, }).Stdout().Contents(ctx) require.NoError(t, err) require.Equal(t, "clobbered-sub-content", out) }) } func TestContainerDirectory(t *testing.T) { t.Parallel() dirRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { WithNewFile struct { WithNewFile struct { ID core.DirectoryID } } } }{} err := testutil.Query( `{ directory { withNewFile(path: "some-file", contents: "some-content") { withNewFile(path: "some-dir/sub-file", contents: "sub-content") { id } } } }`, &dirRes, nil) require.NoError(t, err) id := dirRes.Directory.WithNewFile.WithNewFile.ID writeRes := struct { Container struct { From struct { WithMountedDirectory struct { WithMountedDirectory struct { Exec struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { ID core.DirectoryID } } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { withMountedDirectory(path: "/mnt/dir/overlap", source: $id) { exec(args: ["sh", "-c", "echo hello >> /mnt/dir/overlap/another-file"]) { directory(path: "/mnt/dir/overlap") { id } } } } } } }`, &writeRes, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.NoError(t, err) writtenID := writeRes.Container.From.WithMountedDirectory.WithMountedDirectory.Exec.Directory.ID execRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Container struct { From struct { WithMountedDirectory struct { Exec struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Stdout struct { Contents string } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { exec(args: ["cat", "/mnt/dir/another-file"]) { stdout { contents } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": writtenID, }}) require.NoError(t, err) require.Equal(t, "hello\n", execRes.Container.From.WithMountedDirectory.Exec.Stdout.Contents) } func TestContainerDirectoryErrors(t *testing.T) { t.Parallel() dirRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { WithNewFile struct { WithNewFile struct { ID core.DirectoryID } } } }{} err := testutil.Query( `{ directory { withNewFile(path: "some-file", contents: "some-content") { withNewFile(path: "some-dir/sub-file", contents: "sub-content") { id } } } }`, &dirRes, nil) require.NoError(t, err) id := dirRes.Directory.WithNewFile.WithNewFile.ID err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { directory(path: "/mnt/dir/some-file") { id
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
} } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.Error(t, err) require.Contains(t, err.Error(), "path /mnt/dir/some-file is a file, not a directory") err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { directory(path: "/mnt/dir/bogus") { id } } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.Error(t, err) require.Contains(t, err.Error(), "bogus: no such file or directory") err = testutil.Query( `{ container { from(address: "alpine:3.16.2") { withMountedTemp(path: "/mnt/tmp") {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
directory(path: "/mnt/tmp/bogus") { id } } } } }`, nil, nil) require.Error(t, err) require.Contains(t, err.Error(), "bogus: cannot retrieve path from tmpfs") cacheID := newCache(t) err = testutil.Query( `query Test($cache: CacheID!) { container { from(address: "alpine:3.16.2") { withMountedCache(path: "/mnt/cache", cache: $cache) { directory(path: "/mnt/cache/bogus") { id } } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "cache": cacheID, }}) require.Error(t, err) require.Contains(t, err.Error(), "bogus: cannot retrieve path from cache") } func TestContainerDirectorySourcePath(t *testing.T) { t.Parallel() dirRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { WithNewFile struct { Directory struct { ID core.DirectoryID } } } }{} err := testutil.Query( `{ directory { withNewFile(path: "some-dir/sub-dir/sub-file", contents: "sub-content\n") { directory(path: "some-dir") { id } } } }`, &dirRes, nil) require.NoError(t, err) id := dirRes.Directory.WithNewFile.Directory.ID writeRes := struct { Container struct { From struct { WithMountedDirectory struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Exec struct { Directory struct { ID core.DirectoryID } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { exec(args: ["sh", "-c", "echo more-content >> /mnt/dir/sub-dir/sub-file"]) { directory(path: "/mnt/dir/sub-dir") { id } } } } } }`, &writeRes, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.NoError(t, err) writtenID := writeRes.Container.From.WithMountedDirectory.Exec.Directory.ID execRes := struct { Container struct { From struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
WithMountedDirectory struct { Exec struct { Stdout struct { Contents string } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { exec(args: ["cat", "/mnt/dir/sub-file"]) { stdout { contents } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": writtenID, }}) require.NoError(t, err) require.Equal(t, "sub-content\nmore-content\n", execRes.Container.From.WithMountedDirectory.Exec.Stdout.Contents) } func TestContainerFile(t *testing.T) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
t.Parallel() id := newDirWithFile(t, "some-file", "some-content-") writeRes := struct { Container struct { From struct { WithMountedDirectory struct { WithMountedDirectory struct { Exec struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
File struct { ID core.FileID } } } } } } }{} err := testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { withMountedDirectory(path: "/mnt/dir/overlap", source: $id) { exec(args: ["sh", "-c", "echo -n appended >> /mnt/dir/overlap/some-file"]) { file(path: "/mnt/dir/overlap/some-file") { id } } } } } } }`, &writeRes, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.NoError(t, err) writtenID := writeRes.Container.From.WithMountedDirectory.WithMountedDirectory.Exec.File.ID execRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Container struct { From struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
WithMountedFile struct { Exec struct { Stdout struct { Contents string } } } } } }{} err = testutil.Query( `query Test($id: FileID!) { container { from(address: "alpine:3.16.2") { withMountedFile(path: "/mnt/file", source: $id) { exec(args: ["cat", "/mnt/file"]) { stdout { contents } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": writtenID, }}) require.NoError(t, err) require.Equal(t, "some-content-appended", execRes.Container.From.WithMountedFile.Exec.Stdout.Contents) } func TestContainerFileErrors(t *testing.T) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
t.Parallel() id := newDirWithFile(t, "some-file", "some-content") err := testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { file(path: "/mnt/dir/bogus") { id } } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.Error(t, err) require.Contains(t, err.Error(), "bogus: no such file or directory") err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { file(path: "/mnt/dir") { id }
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
} } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.Error(t, err) require.Contains(t, err.Error(), "path /mnt/dir is a directory, not a file") err = testutil.Query( `{ container { from(address: "alpine:3.16.2") { withMountedTemp(path: "/mnt/tmp") { file(path: "/mnt/tmp/bogus") { id } } } } }`, nil, nil) require.Error(t, err) require.Contains(t, err.Error(), "bogus: cannot retrieve path from tmpfs") cacheID := newCache(t) err = testutil.Query( `query Test($cache: CacheID!) { container { from(address: "alpine:3.16.2") { withMountedCache(path: "/mnt/cache", cache: $cache) { file(path: "/mnt/cache/bogus") { id
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
} } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "cache": cacheID, }}) require.Error(t, err) require.Contains(t, err.Error(), "bogus: cannot retrieve path from cache") secretID := newSecret(t, "some-secret") err = testutil.Query( `query Test($secret: SecretID!) { container { from(address: "alpine:3.16.2") { withMountedSecret(path: "/sekret", source: $secret) { file(path: "/sekret") { contents } } } } }`, nil, &testutil.QueryOptions{Variables: map[string]any{ "secret": secretID, }}) require.Error(t, err) require.Contains(t, err.Error(), "sekret: no such file or directory") } func TestContainerFSDirectory(t *testing.T) { t.Parallel() dirRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Container struct { From struct { Directory struct { ID core.DirectoryID } } } }{} err := testutil.Query( `{ container { from(address: "alpine:3.16.2") { directory(path: "/etc") { id } } } }`, &dirRes, nil) require.NoError(t, err) etcID := dirRes.Container.From.Directory.ID execRes := struct { Container struct { From struct { WithMountedDirectory struct { Exec struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Stdout struct { Contents string } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/etc", source: $id) { exec(args: ["cat", "/mnt/etc/alpine-release"]) { stdout { contents } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": etcID, }}) require.NoError(t, err) require.Equal(t, "3.16.2\n", execRes.Container.From.WithMountedDirectory.Exec.Stdout.Contents) } func TestContainerRelativePaths(t *testing.T) { t.Parallel() dirRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { WithNewFile struct { ID core.DirectoryID } } }{} err := testutil.Query( `{ directory { withNewFile(path: "some-file", contents: "some-content") { id } } }`, &dirRes, nil) require.NoError(t, err) id := dirRes.Directory.WithNewFile.ID writeRes := struct { Container struct { From struct { Exec struct { WithWorkdir struct { WithWorkdir struct { Workdir string WithMountedDirectory struct { WithMountedTemp struct { WithMountedCache struct { Mounts []string Exec struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Directory struct { ID core.DirectoryID } } WithoutMount struct { Mounts []string } } } } } } } } } }{} cacheID := newCache(t) err = testutil.Query( `query Test($id: DirectoryID!, $cache: CacheID!) { container { from(address: "alpine:3.16.2") { exec(args: ["mkdir", "-p", "/mnt/sub"]) { withWorkdir(path: "/mnt") { withWorkdir(path: "sub") { workdir withMountedDirectory(path: "dir", source: $id) { withMountedTemp(path: "tmp") { withMountedCache(path: "cache", cache: $cache) { mounts exec(args: ["touch", "dir/another-file"]) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
directory(path: "dir") { id } } withoutMount(path: "cache") { mounts } } } } } } } } } }`, &writeRes, &testutil.QueryOptions{Variables: map[string]any{ "id": id, "cache": cacheID, }}) require.NoError(t, err) require.Equal(t, []string{"/mnt/sub/dir", "/mnt/sub/tmp", "/mnt/sub/cache"}, writeRes.Container.From.Exec.WithWorkdir.WithWorkdir.WithMountedDirectory.WithMountedTemp.WithMountedCache.Mounts) require.Equal(t, []string{"/mnt/sub/dir", "/mnt/sub/tmp"}, writeRes.Container.From.Exec.WithWorkdir.WithWorkdir.WithMountedDirectory.WithMountedTemp.WithMountedCache.WithoutMount.Mounts) writtenID := writeRes.Container.From.Exec.WithWorkdir.WithWorkdir.WithMountedDirectory.WithMountedTemp.WithMountedCache.Exec.Directory.ID execRes := struct { Container struct { From struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
WithMountedDirectory struct { Exec struct { Stdout struct { Contents string } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "alpine:3.16.2") { withMountedDirectory(path: "/mnt/dir", source: $id) { exec(args: ["ls", "/mnt/dir"]) { stdout { contents } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": writtenID, }}) require.NoError(t, err) require.Equal(t, "another-file\nsome-file\n", execRes.Container.From.WithMountedDirectory.Exec.Stdout.Contents) } func TestContainerMultiFrom(t *testing.T) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
t.Parallel() dirRes := struct { Directory struct { ID core.DirectoryID } }{} err := testutil.Query( `{ directory { id } }`, &dirRes, nil) require.NoError(t, err) id := dirRes.Directory.ID execRes := struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
Container struct { From struct { WithMountedDirectory struct { Exec struct { From struct { Exec struct { Exec struct { Stdout struct { Contents string } } } } } } } } }{} err = testutil.Query( `query Test($id: DirectoryID!) { container { from(address: "node:18.10.0-alpine") { withMountedDirectory(path: "/mnt", source: $id) { exec(args: ["sh", "-c", "node --version >> /mnt/versions"]) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
from(address: "golang:1.18.2-alpine") { exec(args: ["sh", "-c", "go version >> /mnt/versions"]) { exec(args: ["cat", "/mnt/versions"]) { stdout { contents } } } } } } } } }`, &execRes, &testutil.QueryOptions{Variables: map[string]any{ "id": id, }}) require.NoError(t, err) require.Contains(t, execRes.Container.From.WithMountedDirectory.Exec.From.Exec.Exec.Stdout.Contents, "v18.10.0\n") require.Contains(t, execRes.Container.From.WithMountedDirectory.Exec.From.Exec.Exec.Stdout.Contents, "go version go1.18.2") } func TestContainerPublish(t *testing.T) { ctx := context.Background() c, err := dagger.Connect(ctx) require.NoError(t, err) defer c.Close() randomID := identity.NewID() go func() {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
_, err := c.Container(). From("registry:2"). WithEnvVariable("RANDOM", randomID). Exec(). ExitCode(ctx) if err != nil { t.Logf("error running registry: %v", err) } }() _, err = c.Container(). From("alpine:3.16.2"). WithEnvVariable("RANDOM", randomID). Exec(dagger.ContainerExecOpts{ Args: []string{"sh", "-c", "for i in $(seq 1 60); do nc -zv 127.0.0.1 5000 && exit 0; sleep 1; done; exit 1"}, }). ExitCode(ctx) require.NoError(t, err) testRef := "127.0.0.1:5000/testimagepush:latest" pushedRef, err := c.Container(). From("alpine:3.16.2"). Publish(ctx, testRef) require.NoError(t, err) require.NotEqual(t, testRef, pushedRef) require.Contains(t, pushedRef, "@sha256:") contents, err := c.Container(). From(pushedRef).FS().File("/etc/alpine-release").Contents(ctx) require.NoError(t, err) require.Equal(t, contents, "3.16.2\n") } func TestContainerMultipleMounts(t *testing.T) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
c, ctx := connect(t) defer c.Close() dir := t.TempDir() require.NoError(t, os.WriteFile(filepath.Join(dir, "one"), []byte("1"), 0600)) require.NoError(t, os.WriteFile(filepath.Join(dir, "two"), []byte("2"), 0600)) require.NoError(t, os.WriteFile(filepath.Join(dir, "three"), []byte("3"), 0600)) one := c.Host().Directory(dir).File("one") two := c.Host().Directory(dir).File("two") three := c.Host().Directory(dir).File("three") build := c.Container().From("alpine:3.16.2"). WithMountedFile("/example/one", one). WithMountedFile("/example/two", two). WithMountedFile("/example/three", three) build = build.Exec(dagger.ContainerExecOpts{ Args: []string{"ls", "/example/one", "/example/two", "/example/three"}, }) build = build.Exec(dagger.ContainerExecOpts{ Args: []string{"cat", "/example/one", "/example/two", "/example/three"}, }) out, err := build.Stdout().Contents(ctx) require.NoError(t, err) require.Equal(t, "123", out) } func TestContainerExport(t *testing.T) { t.Parallel() ctx := context.Background() wd := t.TempDir() dest := t.TempDir() c, err := dagger.Connect(ctx, dagger.WithWorkdir(wd)) require.NoError(t, err)
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
defer c.Close() ctr := c.Container().From("alpine:3.16.2") t.Run("to absolute dir", func(t *testing.T) { imagePath := filepath.Join(dest, "image.tar") ok, err := ctr.Export(ctx, imagePath) require.NoError(t, err) require.True(t, ok) entries := tarEntries(t, imagePath) require.Contains(t, entries, "oci-layout") require.Contains(t, entries, "index.json") require.Contains(t, entries, "manifest.json") }) t.Run("to workdir", func(t *testing.T) { ok, err := ctr.Export(ctx, "./image.tar") require.NoError(t, err) require.True(t, ok) entries := tarEntries(t, filepath.Join(wd, "image.tar")) require.Contains(t, entries, "oci-layout") require.Contains(t, entries, "index.json") require.Contains(t, entries, "manifest.json") }) t.Run("to outer dir", func(t *testing.T) { ok, err := ctr.Export(ctx, "../") require.Error(t, err) require.False(t, ok) }) } func TestContainerMultiPlatformExport(t *testing.T) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/integration/container_test.go
ctx, cancel := context.WithCancel(context.Background()) defer cancel() c, err := dagger.Connect(ctx, dagger.WithLogOutput(os.Stdout)) require.NoError(t, err) defer c.Close() startRegistry(ctx, c, t) variants := make([]*dagger.Container, 0, len(platformToUname)) for platform := range platformToUname { ctr := c.Container(dagger.ContainerOpts{Platform: platform}). From("alpine:3.16.2"). Exec(dagger.ContainerExecOpts{ Args: []string{"uname", "-m"}, }) variants = append(variants, ctr) } dest := filepath.Join(t.TempDir(), "image.tar") ok, err := c.Container().Export(ctx, dest, dagger.ContainerExportOpts{ PlatformVariants: variants, }) require.NoError(t, err) require.True(t, ok) entries := tarEntries(t, dest) require.Contains(t, entries, "oci-layout") require.Contains(t, entries, "index.json") require.NotContains(t, entries, "manifest.json") }
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
package schema import ( "fmt" "path" "strings" "github.com/dagger/dagger/core" "github.com/dagger/dagger/router" specs "github.com/opencontainers/image-spec/specs-go/v1" ) type containerSchema struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
*baseSchema host *core.Host } var _ router.ExecutableSchema = &containerSchema{} func (s *containerSchema) Name() string { return "container" } func (s *containerSchema) Schema() string { return Container } func (s *containerSchema) Resolvers() router.Resolvers { return router.Resolvers{ "ContainerID": stringResolver(core.ContainerID("")), "Query": router.ObjectResolver{ "container": router.ToResolver(s.container), }, "Container": router.ObjectResolver{ "from": router.ToResolver(s.from), "build": router.ToResolver(s.build), "fs": router.ToResolver(s.fs), "withFS": router.ToResolver(s.withFS), "file": router.ToResolver(s.file), "directory": router.ToResolver(s.directory), "user": router.ToResolver(s.user), "withUser": router.ToResolver(s.withUser), "workdir": router.ToResolver(s.workdir), "withWorkdir": router.ToResolver(s.withWorkdir),
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
"envVariables": router.ToResolver(s.envVariables), "envVariable": router.ToResolver(s.envVariable), "withEnvVariable": router.ToResolver(s.withEnvVariable), "withSecretVariable": router.ToResolver(s.withSecretVariable), "withoutEnvVariable": router.ToResolver(s.withoutEnvVariable), "entrypoint": router.ToResolver(s.entrypoint), "withEntrypoint": router.ToResolver(s.withEntrypoint), "defaultArgs": router.ToResolver(s.defaultArgs), "withDefaultArgs": router.ToResolver(s.withDefaultArgs), "mounts": router.ToResolver(s.mounts), "withMountedDirectory": router.ToResolver(s.withMountedDirectory), "withMountedFile": router.ToResolver(s.withMountedFile), "withMountedTemp": router.ToResolver(s.withMountedTemp), "withMountedCache": router.ToResolver(s.withMountedCache), "withMountedSecret": router.ToResolver(s.withMountedSecret), "withoutMount": router.ToResolver(s.withoutMount), "exec": router.ToResolver(s.exec), "exitCode": router.ToResolver(s.exitCode), "stdout": router.ToResolver(s.stdout), "stderr": router.ToResolver(s.stderr), "publish": router.ToResolver(s.publish), "platform": router.ToResolver(s.platform), "export": router.ToResolver(s.export), }, } } func (s *containerSchema) Dependencies() []router.ExecutableSchema { return nil } type containerArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
ID core.ContainerID Platform *specs.Platform } func (s *containerSchema) container(ctx *router.Context, parent any, args containerArgs) (*core.Container, error) { platform := s.baseSchema.platform if args.Platform != nil { if args.ID != "" { return nil, fmt.Errorf("cannot specify both existing container ID and platform") } platform = *args.Platform } return core.NewContainer(args.ID, platform) } type containerFromArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Address string } func (s *containerSchema) from(ctx *router.Context, parent *core.Container, args containerFromArgs) (*core.Container, error) { return parent.From(ctx, s.gw, args.Address) } type containerBuildArgs struct { Context core.DirectoryID Dockerfile string } func (s *containerSchema) build(ctx *router.Context, parent *core.Container, args containerBuildArgs) (*core.Container, error) { return parent.Build(ctx, s.gw, &core.Directory{ID: args.Context}, args.Dockerfile) } func (s *containerSchema) withFS(ctx *router.Context, parent *core.Container, arg core.Directory) (*core.Container, error) { ctr, err := parent.WithFS(ctx, &arg) if err != nil { return nil, err } return ctr, nil } func (s *containerSchema) fs(ctx *router.Context, parent *core.Container, args any) (*core.Directory, error) { return parent.FS(ctx) } type containerExecArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
core.ContainerExecOpts } func (s *containerSchema) exec(ctx *router.Context, parent *core.Container, args containerExecArgs) (*core.Container, error) { return parent.Exec(ctx, s.gw, args.ContainerExecOpts) } func (s *containerSchema) exitCode(ctx *router.Context, parent *core.Container, args any) (*int, error) { return parent.ExitCode(ctx, s.gw) } func (s *containerSchema) stdout(ctx *router.Context, parent *core.Container, args any) (*core.File, error) { return parent.MetaFile(ctx, s.gw, "stdout") } func (s *containerSchema) stderr(ctx *router.Context, parent *core.Container, args any) (*core.File, error) { return parent.MetaFile(ctx, s.gw, "stderr") } type containerWithEntrypointArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Args []string } func (s *containerSchema) withEntrypoint(ctx *router.Context, parent *core.Container, args containerWithEntrypointArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { cfg.Entrypoint = args.Args return cfg }) } func (s *containerSchema) entrypoint(ctx *router.Context, parent *core.Container, args containerWithVariableArgs) ([]string, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return nil, err } return cfg.Entrypoint, nil } type containerWithDefaultArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Args *[]string } func (s *containerSchema) withDefaultArgs(ctx *router.Context, parent *core.Container, args containerWithDefaultArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { if args.Args == nil { cfg.Cmd = []string{} return cfg } cfg.Cmd = *args.Args return cfg }) } func (s *containerSchema) defaultArgs(ctx *router.Context, parent *core.Container, args any) ([]string, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return nil, err } return cfg.Cmd, nil } type containerWithUserArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string } func (s *containerSchema) withUser(ctx *router.Context, parent *core.Container, args containerWithUserArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { cfg.User = args.Name return cfg }) } func (s *containerSchema) user(ctx *router.Context, parent *core.Container, args containerWithVariableArgs) (string, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return "", err } return cfg.User, nil } type containerWithWorkdirArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Path string } func (s *containerSchema) withWorkdir(ctx *router.Context, parent *core.Container, args containerWithWorkdirArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { cfg.WorkingDir = absPath(cfg.WorkingDir, args.Path) return cfg }) } func (s *containerSchema) workdir(ctx *router.Context, parent *core.Container, args containerWithVariableArgs) (string, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return "", err } return cfg.WorkingDir, nil } type containerWithVariableArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string Value string } func (s *containerSchema) withEnvVariable(ctx *router.Context, parent *core.Container, args containerWithVariableArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { newEnv := []string{} prefix := args.Name + "=" for _, env := range cfg.Env { if !strings.HasPrefix(env, prefix) { newEnv = append(newEnv, env) } } newEnv = append(newEnv, fmt.Sprintf("%s=%s", args.Name, args.Value)) cfg.Env = newEnv return cfg }) } type containerWithoutVariableArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string } func (s *containerSchema) withoutEnvVariable(ctx *router.Context, parent *core.Container, args containerWithoutVariableArgs) (*core.Container, error) { return parent.UpdateImageConfig(ctx, func(cfg specs.ImageConfig) specs.ImageConfig { removedEnv := []string{} prefix := args.Name + "=" for _, env := range cfg.Env { if !strings.HasPrefix(env, prefix) { removedEnv = append(removedEnv, env) } } cfg.Env = removedEnv return cfg }) } type EnvVariable struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string `json:"name"` Value string `json:"value"` } func (s *containerSchema) envVariables(ctx *router.Context, parent *core.Container, args any) ([]EnvVariable, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return nil, err } vars := make([]EnvVariable, 0, len(cfg.Env)) for _, v := range cfg.Env { name, value, _ := strings.Cut(v, "=") e := EnvVariable{ Name: name, Value: value, } vars = append(vars, e) } return vars, nil } type containerVariableArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string } func (s *containerSchema) envVariable(ctx *router.Context, parent *core.Container, args containerVariableArgs) (*string, error) { cfg, err := parent.ImageConfig(ctx) if err != nil { return nil, err } for _, env := range cfg.Env { name, val, ok := strings.Cut(env, "=") if ok && name == args.Name { return &val, nil } } return nil, nil } type containerWithMountedDirectoryArgs struct { Path string Source core.DirectoryID } func (s *containerSchema) withMountedDirectory(ctx *router.Context, parent *core.Container, args containerWithMountedDirectoryArgs) (*core.Container, error) { return parent.WithMountedDirectory(ctx, args.Path, &core.Directory{ID: args.Source}) } type containerPublishArgs struct { Address string PlatformVariants []core.ContainerID } func (s *containerSchema) publish(ctx *router.Context, parent *core.Container, args containerPublishArgs) (string, error) { return parent.Publish(ctx, args.Address, args.PlatformVariants, s.bkClient, s.solveOpts, s.solveCh) } type containerWithMountedFileArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Path string Source core.FileID } func (s *containerSchema) withMountedFile(ctx *router.Context, parent *core.Container, args containerWithMountedFileArgs) (*core.Container, error) { return parent.WithMountedFile(ctx, args.Path, &core.File{ID: args.Source}) } type containerWithMountedCacheArgs struct { Path string Cache core.CacheID Source core.DirectoryID } func (s *containerSchema) withMountedCache(ctx *router.Context, parent *core.Container, args containerWithMountedCacheArgs) (*core.Container, error) { var dir *core.Directory if args.Source != "" { dir = &core.Directory{ID: args.Source} } return parent.WithMountedCache(ctx, args.Path, args.Cache, dir) } type containerWithMountedTempArgs struct { Path string } func (s *containerSchema) withMountedTemp(ctx *router.Context, parent *core.Container, args containerWithMountedTempArgs) (*core.Container, error) { return parent.WithMountedTemp(ctx, args.Path) } type containerWithoutMountArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Path string } func (s *containerSchema) withoutMount(ctx *router.Context, parent *core.Container, args containerWithoutMountArgs) (*core.Container, error) { return parent.WithoutMount(ctx, args.Path) } func (s *containerSchema) mounts(ctx *router.Context, parent *core.Container, _ any) ([]string, error) { return parent.Mounts(ctx) } type containerDirectoryArgs struct { Path string } func (s *containerSchema) directory(ctx *router.Context, parent *core.Container, args containerDirectoryArgs) (*core.Directory, error) { return parent.Directory(ctx, s.gw, args.Path) } type containerFileArgs struct { Path string } func (s *containerSchema) file(ctx *router.Context, parent *core.Container, args containerFileArgs) (*core.File, error) { return parent.File(ctx, s.gw, args.Path) } func absPath(workDir string, containerPath string) string { if path.IsAbs(containerPath) { return containerPath } if workDir == "" { workDir = "/" } return path.Join(workDir, containerPath) } type containerWithSecretVariableArgs struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,393
Fix build of shim when execing on non-platform specific llb state
from @marcosnils ``` query Build($file: FileID!, $ref: ContainerAddress!) { container { withMountedFile(path: "/mnt/dagger", source: $file) { exec(args: ["cp", "/mnt/dagger", "/dagger"]) { withEntrypoint(args: ["dagger"]) { publish(address: $ref) } } } } } ``` results in ``` { "data": null, "errors": [ { "message": "build shim: failed to parse target platform unknown: \"unknown\": unknown operating system or architecture: invalid argument", "locations": [ { "line": 4, "column": 7 } ], "path": [ "container", "withMountedFile", "exec" ] } ] } ``` The above query should fail since there's no fs on which to execute `cp` but it shouldn't fail by not being able to build the shim. It's also should be possible in general to, e.g. start at scratch, unpack a static binary and exec it, in which case this shouldn't even be an error case at all.
https://github.com/dagger/dagger/issues/3393
https://github.com/dagger/dagger/pull/3399
51c7941c3c8e7c2ac4242f5c41cec5a56d17e9d3
f8b3c63637cfcca2ec677fea2e020885664bd301
2022-10-14T22:29:08Z
go
2022-11-10T06:01:37Z
core/schema/container.go
Name string Secret core.SecretID } func (s *containerSchema) withSecretVariable(ctx *router.Context, parent *core.Container, args containerWithSecretVariableArgs) (*core.Container, error) { return parent.WithSecretVariable(ctx, args.Name, &core.Secret{ID: args.Secret}) } type containerWithMountedSecretArgs struct { Path string Source core.SecretID } func (s *containerSchema) withMountedSecret(ctx *router.Context, parent *core.Container, args containerWithMountedSecretArgs) (*core.Container, error) { return parent.WithMountedSecret(ctx, args.Path, core.NewSecret(args.Source)) } func (s *containerSchema) platform(ctx *router.Context, parent *core.Container, args any) (specs.Platform, error) { return parent.Platform() } type containerExportArgs struct { Path string PlatformVariants []core.ContainerID } func (s *containerSchema) export(ctx *router.Context, parent *core.Container, args containerExportArgs) (bool, error) { if err := parent.Export(ctx, s.host, args.Path, args.PlatformVariants, s.bkClient, s.solveOpts, s.solveCh); err != nil { return false, err } return true, nil }
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/all.go
package sdk import ( "context" "errors"
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/all.go
"fmt" "os" "github.com/hexops/gotextdiff" "github.com/hexops/gotextdiff/myers" "github.com/hexops/gotextdiff/span" "github.com/magefile/mage/mg" "golang.org/x/sync/errgroup" ) type SDK interface { Lint(ctx context.Context) error Test(ctx context.Context) error Generate(ctx context.Context) error Publish(ctx context.Context, tag string) error Bump(ctx context.Context, engineVersion string) error } var availableSDKs = []SDK{ &Go{}, &Python{}, &NodeJS{}, } var _ SDK = All{} type All mg.Namespace func (t All) Lint(ctx context.Context) error { return t.runAll(func(s SDK) any { return s.Lint }) } func (t All) Test(ctx context.Context) error { return t.runAll(func(s SDK) any { return s.Test
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/all.go
}) } func (t All) Generate(ctx context.Context) error { return t.runAll(func(s SDK) any { return s.Generate }) } func (t All) Publish(ctx context.Context, version string) error { return errors.New("publish is not supported on `all` target. Please run this command on individual SDKs") } func (t All) Bump(ctx context.Context, engineVersion string) error { eg, gctx := errgroup.WithContext(ctx) for _, sdk := range availableSDKs { sdk := sdk eg.Go(func() error { return sdk.Bump(gctx, engineVersion) }) } return eg.Wait() } func (t All) runAll(fn func(SDK) any) error { handlers := []any{} for _, sdk := range availableSDKs { handlers = append(handlers, fn(sdk)) } mg.Deps(handlers...) return nil } func lintGeneratedCode(fn func() error, files ...string) error { originals := map[string][]byte{}
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/all.go
for _, f := range files { content, err := os.ReadFile(f) if err != nil { return err } originals[f] = content } defer func() { for _, f := range files { defer os.WriteFile(f, originals[f], 0600) } }() if err := fn(); err != nil { return err } for _, f := range files { original := string(originals[f]) updated, err := os.ReadFile(f) if err != nil { return err } if original != string(updated) { edits := myers.ComputeEdits(span.URIFromPath(f), original, string(updated)) diff := fmt.Sprint(gotextdiff.ToUnified(f, f, original, edits)) return fmt.Errorf("generated api mismatch. please run `mage sdk:all:generate`:\n%s", diff) } } return nil }
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/nodejs.go
package sdk import ( "context" "fmt" "os" "dagger.io/dagger" "github.com/dagger/dagger/internal/mage/util" "github.com/magefile/mage/mg" ) var ( nodejsGeneratedAPIPath = "sdk/nodejs/api/client.gen.ts" ) var _ SDK = NodeJS{} type NodeJS mg.Namespace func (t NodeJS) Lint(ctx context.Context) error { c, err := dagger.Connect(ctx, dagger.WithLogOutput(os.Stderr)) if err != nil { return err } defer c.Close() return util.WithDevEngine(ctx, c, func(ctx context.Context, c *dagger.Client) error { _, err = nodeJSBase(c). Exec(dagger.ContainerExecOpts{
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/nodejs.go
Args: []string{"yarn", "lint"}, ExperimentalPrivilegedNesting: true, }). WithWorkdir("/app"). ExitCode(ctx) return err }) } func (t NodeJS) Generateandcheck(ctx context.Context) error { return lintGeneratedCode(func() error { return t.Generate(ctx) }, nodejsGeneratedAPIPath) } func (t NodeJS) Test(ctx context.Context) error { c, err := dagger.Connect(ctx, dagger.WithLogOutput(os.Stderr)) if err != nil { return err } defer c.Close() return util.WithDevEngine(ctx, c, func(ctx context.Context, c *dagger.Client) error { _, err = nodeJSBase(c). Exec(dagger.ContainerExecOpts{ Args: []string{"yarn", "run", "test"}, ExperimentalPrivilegedNesting: true, }). ExitCode(ctx) return err }) } func (t NodeJS) Generate(ctx context.Context) error {
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/nodejs.go
c, err := dagger.Connect(ctx, dagger.WithLogOutput(os.Stderr)) if err != nil { return err } defer c.Close() return util.WithDevEngine(ctx, c, func(ctx context.Context, c *dagger.Client) error { generated, err := util.GoBase(c). WithMountedFile("/usr/local/bin/cloak", util.DaggerBinary(c)). Exec(dagger.ContainerExecOpts{ Args: []string{"cloak", "client-gen", "--lang", "nodejs", "-o", nodejsGeneratedAPIPath}, ExperimentalPrivilegedNesting: true, }). File(nodejsGeneratedAPIPath). Contents(ctx) if err != nil { return err } return os.WriteFile(nodejsGeneratedAPIPath, []byte(generated), 0o600) }) } func (t NodeJS) Publish(ctx context.Context, tag string) error { panic("FIXME") } func (t NodeJS) Bump(ctx context.Context, version string) error { engineReference := fmt.Sprintf("// Code generated by dagger. DO NOT EDIT.\n"+ "const DEFAULT_IMAGE_REF = %q;\n\n"+ "export const DEFAULT_HOST = `docker-image://${DEFAULT_IMAGE_REF}`\n", version) return os.WriteFile("sdk/nodejs/provisioning/default.ts", []byte(engineReference), 0600) } func nodeJSBase(c *dagger.Client) *dagger.Container {
closed
dagger/dagger
https://github.com/dagger/dagger
3,719
NodeJS SDK : Publish NodeJS SDK to npm
Publishes the NodeJS SDK to the registry so that it can be installed by name. ex: `npm publish @dagger.io/nodejs-sdk` https://www.npmjs.com is the npm Registry that has a public collection of packages of open-source code for Node.js. In order to publish, follow the developer guide: https://docs.npmjs.com/cli/v9/using-npm/developers#the-packagejson-file Need to discuss everything related to the CI with github action (Test, build and publish to NPM). cc @gerhard Tasks: - [x] add a README.md - [x] LICENSE Npm automatically handles node_modules ignore. Do not add node_modules to .npmignore.
https://github.com/dagger/dagger/issues/3719
https://github.com/dagger/dagger/pull/3809
88e795da2c1b78e4b2b79f542a3233c57dd1fbed
dea51153c37566831d1075479f708d7d51bbcf5d
2022-11-07T20:47:22Z
go
2022-11-15T20:39:32Z
internal/mage/sdk/nodejs.go
src := c.Directory().WithDirectory("/", util.Repository(c).Directory("sdk/nodejs")) base := c.Container(). // ⚠️ Keep this in sync with the engine version defined in package.json From("node:16-alpine") return base. WithMountedDirectory("/app", src). WithWorkdir("/app"). Exec(dagger.ContainerExecOpts{ Args: []string{"yarn", "install"}, }) }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
package core import ( "context" "encoding/json" "fmt" "io" "os" "path" "strconv" "strings" "github.com/containerd/containerd/platforms" "github.com/dagger/dagger/core/shim" "github.com/docker/distribution/reference" bkclient "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" dockerfilebuilder "github.com/moby/buildkit/frontend/dockerfile/builder" bkgw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/solver/pb" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) const ( DaggerSockName = "dagger-sock" DaggerSockPath = "/dagger.sock" ) type Container struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
ID ContainerID `json:"id"` } func NewContainer(id ContainerID, platform specs.Platform) (*Container, error) { if id == "" { id, err := (&containerIDPayload{Platform: platform}).Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } return &Container{ID: id}, nil } type ContainerID string func (id ContainerID) decode() (*containerIDPayload, error) { if id == "" { return &containerIDPayload{}, nil } var payload containerIDPayload if err := decodeID(&payload, id); err != nil { return nil, err } return &payload, nil } type containerIDPayload struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
FS *pb.Definition `json:"fs"` Config specs.ImageConfig `json:"cfg"` Mounts ContainerMounts `json:"mounts,omitempty"` Meta *pb.Definition `json:"meta,omitempty"` Platform specs.Platform `json:"platform,omitempty"` Secrets []ContainerSecret `json:"secret_env,omitempty"` } type ContainerSecret struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
Secret SecretID `json:"secret"` EnvName string `json:"env,omitempty"` MountPath string `json:"path,omitempty"` } func (payload *containerIDPayload) Encode() (ContainerID, error) { id, err := encodeID(payload) if err != nil { return "", err } return ContainerID(id), nil } func (payload *containerIDPayload) FSState() (llb.State, error) { if payload.FS == nil { return llb.Scratch(), nil } return defToState(payload.FS) } const metaMount = "/.dagger_meta_mount" const metaSourcePath = "meta" func (payload *containerIDPayload) MetaState() (*llb.State, error) { if payload.Meta == nil { return nil, nil } metaSt, err := defToState(payload.Meta) if err != nil { return nil, err } return &metaSt, nil } type ContainerMount struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
Source *pb.Definition `json:"source,omitempty"` SourcePath string `json:"source_path,omitempty"` Target string `json:"target"` CacheID string `json:"cache_id,omitempty"` CacheSharingMode string `json:"cache_sharing,omitempty"` Tmpfs bool `json:"tmpfs,omitempty"` } func (mnt ContainerMount) SourceState() (llb.State, error) { if mnt.Source == nil { return llb.Scratch(), nil } return defToState(mnt.Source) } type ContainerMounts []ContainerMount func (mnts ContainerMounts) With(newMnt ContainerMount) ContainerMounts { mntsCp := make(ContainerMounts, 0, len(mnts))
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
parent := newMnt.Target + "/" for _, mnt := range mnts { if mnt.Target == newMnt.Target || strings.HasPrefix(mnt.Target, parent) { continue } mntsCp = append(mntsCp, mnt) } mntsCp = append(mntsCp, newMnt) return mntsCp } func (container *Container) From(ctx context.Context, gw bkgw.Client, addr string) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } platform := payload.Platform refName, err := reference.ParseNormalizedNamed(addr) if err != nil { return nil, err } ref := reference.TagNameOnly(refName).String() _, cfgBytes, err := gw.ResolveImageConfig(ctx, ref, llb.ResolveImageConfigOpt{ Platform: &platform, ResolveMode: llb.ResolveModeDefault.String(), }) if err != nil { return nil, err } var imgSpec specs.Image
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
if err := json.Unmarshal(cfgBytes, &imgSpec); err != nil { return nil, err } dir, err := NewDirectory(ctx, llb.Image(addr), "/", platform) if err != nil { return nil, err } ctr, err := container.WithRootFS(ctx, dir) if err != nil { return nil, err } return ctr.UpdateImageConfig(ctx, func(specs.ImageConfig) specs.ImageConfig { return imgSpec.Config }) } const defaultDockerfileName = "Dockerfile" func (container *Container) Build(ctx context.Context, gw bkgw.Client, context *Directory, dockerfile string) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } ctxPayload, err := context.ID.Decode() if err != nil { return nil, err } platform := payload.Platform opts := map[string]string{ "platform": platforms.Format(platform), "contextsubdir": ctxPayload.Dir, }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
if dockerfile != "" { opts["filename"] = path.Join(ctxPayload.Dir, dockerfile) } else { opts["filename"] = path.Join(ctxPayload.Dir, defaultDockerfileName) } inputs := map[string]*pb.Definition{ dockerfilebuilder.DefaultLocalNameContext: ctxPayload.LLB, dockerfilebuilder.DefaultLocalNameDockerfile: ctxPayload.LLB, } res, err := gw.Solve(ctx, bkgw.SolveRequest{ Frontend: "dockerfile.v0", FrontendOpt: opts, FrontendInputs: inputs, }) if err != nil { return nil, err } bkref, err := res.SingleRef() if err != nil { return nil, err } var st llb.State if bkref == nil { st = llb.Scratch() } else { st, err = bkref.ToState() if err != nil { return nil, err } }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
def, err := st.Marshal(ctx, llb.Platform(platform)) if err != nil { return nil, err } payload.FS = def.ToPB() cfgBytes, found := res.Metadata[exptypes.ExporterImageConfigKey] if found { var imgSpec specs.Image if err := json.Unmarshal(cfgBytes, &imgSpec); err != nil { return nil, err } payload.Config = imgSpec.Config } id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) RootFS(ctx context.Context) (*Directory, error) { payload, err := container.ID.decode() if err != nil { return nil, err } return (&directoryIDPayload{ LLB: payload.FS, Platform: payload.Platform, }).ToDirectory() } func (container *Container) WithRootFS(ctx context.Context, dir *Directory) (*Container, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
payload, err := container.ID.decode() if err != nil { return nil, err } dirPayload, err := dir.ID.Decode() if err != nil { return nil, err } payload.FS = dirPayload.LLB id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) WithMountedDirectory(ctx context.Context, target string, source *Directory) (*Container, error) { payload, err := source.ID.Decode() if err != nil { return nil, err } return container.withMounted(target, payload.LLB, payload.Dir) } func (container *Container) WithMountedFile(ctx context.Context, target string, source *File) (*Container, error) { payload, err := source.ID.decode() if err != nil { return nil, err } return container.withMounted(target, payload.LLB, payload.File) } func (container *Container) WithMountedCache(ctx context.Context, target string, cache CacheID, source *Directory) (*Container, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
payload, err := container.ID.decode() if err != nil { return nil, err } cachePayload, err := cache.decode() if err != nil { return nil, err } target = absPath(payload.Config.WorkingDir, target) mount := ContainerMount{ Target: target, CacheID: cachePayload.Sum(), CacheSharingMode: "shared", } if source != nil { srcPayload, err := source.ID.Decode() if err != nil { return nil, err } mount.Source = srcPayload.LLB mount.SourcePath = srcPayload.Dir } payload.Mounts = payload.Mounts.With(mount) id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) WithMountedTemp(ctx context.Context, target string) (*Container, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
payload, err := container.ID.decode() if err != nil { return nil, err } target = absPath(payload.Config.WorkingDir, target) payload.Mounts = payload.Mounts.With(ContainerMount{ Target: target, Tmpfs: true, }) id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) WithMountedSecret(ctx context.Context, target string, source *Secret) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } target = absPath(payload.Config.WorkingDir, target) payload.Secrets = append(payload.Secrets, ContainerSecret{ Secret: source.ID, MountPath: target, }) id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
} func (container *Container) WithoutMount(ctx context.Context, target string) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } target = absPath(payload.Config.WorkingDir, target) var found bool var foundIdx int for i := len(payload.Mounts) - 1; i >= 0; i-- { if payload.Mounts[i].Target == target { found = true foundIdx = i break } } if found { payload.Mounts = append(payload.Mounts[:foundIdx], payload.Mounts[foundIdx+1:]...) } id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) Mounts(ctx context.Context) ([]string, error) { payload, err := container.ID.decode() if err != nil { return nil, err }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
mounts := []string{} for _, mnt := range payload.Mounts { mounts = append(mounts, mnt.Target) } return mounts, nil } func (container *Container) WithSecretVariable(ctx context.Context, name string, secret *Secret) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } payload.Secrets = append(payload.Secrets, ContainerSecret{ Secret: secret.ID, EnvName: name, }) id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) Directory(ctx context.Context, gw bkgw.Client, dirPath string) (*Directory, error) { dir, err := locatePath(ctx, container, dirPath, gw, NewDirectory) if err != nil { return nil, err } info, err := dir.Stat(ctx, gw, ".") if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return nil, err } if !info.IsDir() { return nil, fmt.Errorf("path %s is a file, not a directory", dirPath) } return dir, nil } func (container *Container) File(ctx context.Context, gw bkgw.Client, filePath string) (*File, error) { file, err := locatePath(ctx, container, filePath, gw, NewFile) if err != nil { return nil, err } info, err := file.Stat(ctx, gw) if err != nil { return nil, err } if info.IsDir() { return nil, fmt.Errorf("path %s is a directory, not a file", filePath) } return file, nil } func locatePath[T *File | *Directory]( ctx context.Context, container *Container, containerPath string, gw bkgw.Client, init func(context.Context, llb.State, string, specs.Platform) (T, error), ) (T, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
payload, err := container.ID.decode() if err != nil { return nil, err } containerPath = absPath(payload.Config.WorkingDir, containerPath) var found T for i := len(payload.Mounts) - 1; i >= 0; i-- { mnt := payload.Mounts[i] if containerPath == mnt.Target || strings.HasPrefix(containerPath, mnt.Target+"/") { if mnt.Tmpfs { return nil, fmt.Errorf("%s: cannot retrieve path from tmpfs", containerPath) } if mnt.CacheID != "" { return nil, fmt.Errorf("%s: cannot retrieve path from cache", containerPath) } st, err := mnt.SourceState() if err != nil { return nil, err } sub := mnt.SourcePath if containerPath != mnt.Target { dirSub := strings.TrimPrefix(containerPath, mnt.Target+"/") if dirSub != "" { sub = path.Join(sub, dirSub) } } found, err = init(ctx, st, sub, payload.Platform) if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return nil, err } break } } if found == nil { st, err := payload.FSState() if err != nil { return nil, err } found, err = init(ctx, st, containerPath, payload.Platform) if err != nil { return nil, err } } return found, nil } func (container *Container) withMounted(target string, srcDef *pb.Definition, srcPath string) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } target = absPath(payload.Config.WorkingDir, target) payload.Mounts = payload.Mounts.With(ContainerMount{ Source: srcDef, SourcePath: srcPath, Target: target, }) id, err := payload.Encode() if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return nil, err } return &Container{ID: id}, nil } func (container *Container) ImageConfig(ctx context.Context) (specs.ImageConfig, error) { payload, err := container.ID.decode() if err != nil { return specs.ImageConfig{}, err } return payload.Config, nil } func (container *Container) UpdateImageConfig(ctx context.Context, updateFn func(specs.ImageConfig) specs.ImageConfig) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, err } payload.Config = updateFn(payload.Config) id, err := payload.Encode() if err != nil { return nil, err } return &Container{ID: id}, nil } func (container *Container) Exec(ctx context.Context, gw bkgw.Client, defaultPlatform specs.Platform, opts ContainerExecOpts) (*Container, error) { payload, err := container.ID.decode() if err != nil { return nil, fmt.Errorf("decode id: %w", err) } cfg := payload.Config mounts := payload.Mounts
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
platform := payload.Platform if platform.OS == "" { platform = defaultPlatform } shimSt, err := shim.Build(ctx, gw, platform) if err != nil { return nil, fmt.Errorf("build shim: %w", err) } args := opts.Args if len(args) == 0 { args = cfg.Cmd } if len(cfg.Entrypoint) > 0 { args = append(cfg.Entrypoint, args...) } runOpts := []llb.RunOption{ llb.AddMount(shim.Path, shimSt, llb.SourcePath(shim.Path)), llb.Args(append([]string{shim.Path}, args...)), llb.WithCustomName(strings.Join(args, " ")), } if opts.ExperimentalPrivilegedNesting { runOpts = append(runOpts, llb.AddEnv("DAGGER_HOST", "unix:/dagger.sock"), llb.AddSSHSocket( llb.SSHID(DaggerSockName), llb.SSHSocketTarget(DaggerSockPath), ),
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
) } meta := llb.Mkdir(metaSourcePath, 0o777) if opts.Stdin != "" { meta = meta.Mkfile(path.Join(metaSourcePath, "stdin"), 0o600, []byte(opts.Stdin)) } runOpts = append(runOpts, llb.AddMount(metaMount, llb.Scratch().File(meta), llb.SourcePath(metaSourcePath))) if opts.RedirectStdout != "" { runOpts = append(runOpts, llb.AddEnv("_DAGGER_REDIRECT_STDOUT", opts.RedirectStdout)) } if opts.RedirectStderr != "" { runOpts = append(runOpts, llb.AddEnv("_DAGGER_REDIRECT_STDERR", opts.RedirectStderr)) } if cfg.User != "" { runOpts = append(runOpts, llb.User(cfg.User)) } if cfg.WorkingDir != "" { runOpts = append(runOpts, llb.Dir(cfg.WorkingDir)) } for _, env := range cfg.Env { name, val, ok := strings.Cut(env, "=") if !ok {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
_ = ok } runOpts = append(runOpts, llb.AddEnv(name, val)) } for i, secret := range payload.Secrets { secretOpts := []llb.SecretOption{llb.SecretID(string(secret.Secret))} var secretDest string switch { case secret.EnvName != "": secretDest = secret.EnvName secretOpts = append(secretOpts, llb.SecretAsEnv(true)) case secret.MountPath != "": secretDest = secret.MountPath default: return nil, fmt.Errorf("malformed secret config at index %d", i) } runOpts = append(runOpts, llb.AddSecret(secretDest, secretOpts...)) } fsSt, err := payload.FSState() if err != nil { return nil, fmt.Errorf("fs state: %w", err) } for _, mnt := range mounts { srcSt, err := mnt.SourceState() if err != nil { return nil, fmt.Errorf("mount %s: %w", mnt.Target, err) } mountOpts := []llb.MountOption{}
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
if mnt.SourcePath != "" { mountOpts = append(mountOpts, llb.SourcePath(mnt.SourcePath)) } if mnt.CacheSharingMode != "" { var sharingMode llb.CacheMountSharingMode switch mnt.CacheSharingMode { case "shared": sharingMode = llb.CacheMountShared case "private": sharingMode = llb.CacheMountPrivate case "locked": sharingMode = llb.CacheMountLocked default: return nil, errors.Errorf("invalid cache mount sharing mode %q", mnt.CacheSharingMode) } mountOpts = append(mountOpts, llb.AsPersistentCacheDir(mnt.CacheID, sharingMode)) } if mnt.Tmpfs { mountOpts = append(mountOpts, llb.Tmpfs()) } runOpts = append(runOpts, llb.AddMount(mnt.Target, srcSt, mountOpts...)) } execSt := fsSt.Run(runOpts...) execDef, err := execSt.Root().Marshal(ctx, llb.Platform(platform)) if err != nil { return nil, fmt.Errorf("marshal root: %w", err) } payload.FS = execDef.ToPB() metaDef, err := execSt.GetMount(metaMount).Marshal(ctx, llb.Platform(platform)) if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return nil, fmt.Errorf("get meta mount: %w", err) } payload.Meta = metaDef.ToPB() for i, mnt := range mounts { if mnt.Tmpfs || mnt.CacheID != "" { continue } mountSt := execSt.GetMount(mnt.Target) execMountDef, err := mountSt.Marshal(ctx, llb.Platform(platform)) if err != nil { return nil, fmt.Errorf("propagate %s: %w", mnt.Target, err) } mounts[i].Source = execMountDef.ToPB() } payload.Mounts = mounts id, err := payload.Encode() if err != nil { return nil, fmt.Errorf("encode: %w", err) } return &Container{ID: id}, nil } func (container *Container) ExitCode(ctx context.Context, gw bkgw.Client) (*int, error) { file, err := container.MetaFile(ctx, gw, "exitCode") if err != nil { return nil, err } if file == nil { return nil, nil }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
content, err := file.Contents(ctx, gw) if err != nil { return nil, err } exitCode, err := strconv.Atoi(string(content)) if err != nil { return nil, err } return &exitCode, nil } func (container *Container) MetaFile(ctx context.Context, gw bkgw.Client, filePath string) (*File, error) { payload, err := container.ID.decode() if err != nil { return nil, err } meta, err := payload.MetaState() if err != nil { return nil, err } if meta == nil { return nil, nil } return NewFile(ctx, *meta, path.Join(metaSourcePath, filePath), payload.Platform) } func (container *Container) Publish( ctx context.Context, ref string, platformVariants []ContainerID, bkClient *bkclient.Client, solveOpts bkclient.SolveOpt,
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
solveCh chan<- *bkclient.SolveStatus, ) (string, error) { solveOpts.Exports = []bkclient.ExportEntry{ { Type: bkclient.ExporterImage, Attrs: map[string]string{ "name": ref, "push": "true", }, }, } ch, wg := mirrorCh(solveCh) defer wg.Wait() res, err := bkClient.Build(ctx, solveOpts, "", func(ctx context.Context, gw bkgw.Client) (*bkgw.Result, error) { return container.export(ctx, gw, platformVariants) }, ch) if err != nil { return "", err } refName, err := reference.ParseNormalizedNamed(ref) if err != nil { return "", err } imageDigest, found := res.ExporterResponse[exptypes.ExporterImageDigestKey] if found { dig, err := digest.Parse(imageDigest) if err != nil { return "", fmt.Errorf("parse digest: %w", err) }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
withDig, err := reference.WithDigest(refName, dig) if err != nil { return "", fmt.Errorf("with digest: %w", err) } return withDig.String(), nil } return ref, nil } func (container *Container) Platform() (specs.Platform, error) { payload, err := container.ID.decode() if err != nil { return specs.Platform{}, err } return payload.Platform, nil } func (container *Container) Export( ctx context.Context, host *Host, dest string, platformVariants []ContainerID, bkClient *bkclient.Client, solveOpts bkclient.SolveOpt, solveCh chan<- *bkclient.SolveStatus, ) error { dest, err := host.NormalizeDest(dest) if err != nil { return err } out, err := os.Create(dest) if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return err } defer out.Close() return host.Export(ctx, bkclient.ExportEntry{ Type: bkclient.ExporterOCI, Output: func(map[string]string) (io.WriteCloser, error) { return out, nil }, }, dest, bkClient, solveOpts, solveCh, func(ctx context.Context, gw bkgw.Client) (*bkgw.Result, error) { return container.export(ctx, gw, platformVariants) }) } func (container *Container) export( ctx context.Context, gw bkgw.Client, platformVariants []ContainerID, ) (*bkgw.Result, error) { var payloads []*containerIDPayload if container.ID != "" { payload, err := container.ID.decode() if err != nil { return nil, err } if payload.FS != nil { payloads = append(payloads, payload) } } for _, id := range platformVariants { payload, err := id.decode() if err != nil {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
return nil, err } if payload.FS != nil { payloads = append(payloads, payload) } } if len(payloads) == 0 { return nil, errors.New("no containers to export") } if len(payloads) == 1 { payload := payloads[0] st, err := payload.FSState() if err != nil { return nil, err } stDef, err := st.Marshal(ctx, llb.Platform(payload.Platform)) if err != nil { return nil, err } res, err := gw.Solve(ctx, bkgw.SolveRequest{ Evaluate: true, Definition: stDef.ToPB(), }) if err != nil { return nil, err } cfgBytes, err := json.Marshal(specs.Image{ Architecture: payload.Platform.Architecture, OS: payload.Platform.OS,
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
OSVersion: payload.Platform.OSVersion, OSFeatures: payload.Platform.OSFeatures, Config: payload.Config, }) if err != nil { return nil, err } res.AddMeta(exptypes.ExporterImageConfigKey, cfgBytes) return res, nil } res := bkgw.NewResult() expPlatforms := &exptypes.Platforms{ Platforms: make([]exptypes.Platform, len(payloads)), } for i, payload := range payloads { st, err := payload.FSState() if err != nil { return nil, err } stDef, err := st.Marshal(ctx, llb.Platform(payload.Platform)) if err != nil { return nil, err } r, err := gw.Solve(ctx, bkgw.SolveRequest{ Evaluate: true, Definition: stDef.ToPB(), }) if err != nil { return nil, err }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
ref, err := r.SingleRef() if err != nil { return nil, err } platformKey := platforms.Format(payload.Platform) res.AddRef(platformKey, ref) expPlatforms.Platforms[i] = exptypes.Platform{ ID: platformKey, Platform: payload.Platform, } cfgBytes, err := json.Marshal(specs.Image{ Architecture: payload.Platform.Architecture, OS: payload.Platform.OS, OSVersion: payload.Platform.OSVersion, OSFeatures: payload.Platform.OSFeatures, Config: payload.Config, }) if err != nil { return nil, err } res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, platformKey), cfgBytes) } platformBytes, err := json.Marshal(expPlatforms) if err != nil { return nil, err } res.AddMeta(exptypes.ExporterPlatformsKey, platformBytes) return res, nil } type ContainerExecOpts struct {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/container.go
Args []string Stdin string RedirectStdout string RedirectStderr string ExperimentalPrivilegedNesting bool }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/cmd/main.go
package main import ( "context" "fmt" "io" "net" "net/http" "net/http/httputil" "net/url" "os" "os/exec" "strings" "time" ) const ( stdinPath = "/.dagger_meta_mount/stdin" exitCodePath = "/.dagger_meta_mount/exitCode" ) var ( stdoutPath = "/.dagger_meta_mount/stdout" stderrPath = "/.dagger_meta_mount/stderr" ) func run() int {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/cmd/main.go
if len(os.Args) < 2 { fmt.Fprintf(os.Stderr, "usage: %s <path> [<args>]\n", os.Args[0]) return 1 } if daggerHost := os.Getenv("DAGGER_HOST"); strings.HasPrefix(daggerHost, "unix://") { proxyAddr, err := proxyAPI(daggerHost) if err != nil { fmt.Fprintf(os.Stderr, "err: %v\n", err) return 1 } os.Setenv("DAGGER_HOST", proxyAddr) } name := os.Args[1] args := []string{} if len(os.Args) > 2 { args = os.Args[2:] } cmd := exec.Command(name, args...) cmd.Env = os.Environ() if stdinFile, err := os.Open(stdinPath); err == nil { defer stdinFile.Close() cmd.Stdin = stdinFile } else { cmd.Stdin = nil } stdoutRedirect, found := internalEnv("_DAGGER_REDIRECT_STDOUT") if found { stdoutPath = stdoutRedirect
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/cmd/main.go
} stdoutFile, err := os.Create(stdoutPath) if err != nil { panic(err) } defer stdoutFile.Close() cmd.Stdout = io.MultiWriter(stdoutFile, os.Stdout) stderrRedirect, found := internalEnv("_DAGGER_REDIRECT_STDERR") if found { stderrPath = stderrRedirect } stderrFile, err := os.Create(stderrPath) if err != nil { panic(err) } defer stderrFile.Close() cmd.Stderr = io.MultiWriter(stderrFile, os.Stderr) exitCode := 0 if err := cmd.Run(); err != nil { exitCode = 1 if exiterr, ok := err.(*exec.ExitError); ok { exitCode = exiterr.ExitCode() } } if err := os.WriteFile(exitCodePath, []byte(fmt.Sprintf("%d", exitCode)), 0600); err != nil { panic(err) } return exitCode } func proxyAPI(daggerHost string) (string, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/cmd/main.go
u, err := url.Parse(daggerHost) if err != nil { return "", err } proxy := httputil.NewSingleHostReverseProxy(&url.URL{ Scheme: "http", Host: "localhost", }) proxy.Transport = &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", u.Path) }, } l, err := net.Listen("tcp", "localhost:0") if err != nil { return "", err } port := l.Addr().(*net.TCPAddr).Port srv := &http.Server{ Handler: proxy, ReadHeaderTimeout: 10 * time.Second, } go srv.Serve(l) return fmt.Sprintf("http://localhost:%d", port), nil } func internalEnv(name string) (string, bool) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/cmd/main.go
val, found := os.LookupEnv(name) if !found { return "", false } os.Unsetenv(name) return val, true } func main() { os.Exit(run()) }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/shim.go
package shim import ( "context" "embed" "io/fs" "path" "sync" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/client/llb" dockerfilebuilder "github.com/moby/buildkit/frontend/dockerfile/builder" bkgw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/solver/pb" specs "github.com/opencontainers/image-spec/specs-go/v1" ) var cmd embed.FS var ( state llb.State lock sync.Mutex ) const Path = "/_shim" func init() {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/shim.go
entries, err := fs.ReadDir(cmd, "cmd") if err != nil { panic(err) } state = llb.Scratch() for _, e := range entries { contents, err := fs.ReadFile(cmd, path.Join("cmd", e.Name())) if err != nil { panic(err) } state = state.File(llb.Mkfile(e.Name(), e.Type().Perm(), contents)) e.Name() } } func Build(ctx context.Context, gw bkgw.Client, p specs.Platform) (llb.State, error) {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
core/shim/shim.go
lock.Lock() def, err := state.Marshal(ctx, llb.Platform(p)) lock.Unlock() if err != nil { return llb.State{}, err } opts := map[string]string{ "platform": platforms.Format(p), } inputs := map[string]*pb.Definition{ dockerfilebuilder.DefaultLocalNameContext: def.ToPB(), dockerfilebuilder.DefaultLocalNameDockerfile: def.ToPB(), } res, err := gw.Solve(ctx, bkgw.SolveRequest{ Frontend: "dockerfile.v0", FrontendOpt: opts, FrontendInputs: inputs, }) if err != nil { return llb.State{}, err } bkref, err := res.SingleRef() if err != nil { return llb.State{}, err } return bkref.ToState() }
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
internal/mage/util/util.go
package util import ( "context" "fmt" "os" "os/exec" "runtime" "strings" "sync" "dagger.io/dagger" ) func Repository(c *dagger.Client) *dagger.Directory { return c.Host().Workdir(dagger.HostWorkdirOpts{ Exclude: []string{ "**/node_modules", "**/__pycache__", "**/.venv", "**/.mypy_cache", "**/.pytest_cache", }, }) } func RepositoryGoCodeOnly(c *dagger.Client) *dagger.Directory {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
internal/mage/util/util.go
return c.Directory().WithDirectory("/", Repository(c), dagger.DirectoryWithDirectoryOpts{ Include: []string{ "**/*.go", "**/go.mod", "**/go.sum", "**/*.go.tmpl", "**/*.ts.tmpl", "**/*.graphqls", "**/*.graphql", ".golangci.yml", "**/Dockerfile", }, }) } func GoBase(c *dagger.Client) *dagger.Container {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
internal/mage/util/util.go
repo := RepositoryGoCodeOnly(c) goMods := c.Directory() for _, f := range []string{"go.mod", "go.sum", "sdk/go/go.mod", "sdk/go/go.sum"} { goMods = goMods.WithFile(f, repo.File(f)) } return c.Container(). From("golang:1.19-alpine"). WithEnvVariable("CGO_ENABLED", "0"). WithWorkdir("/app"). WithMountedDirectory("/app", goMods). Exec(dagger.ContainerExecOpts{ Args: []string{"go", "mod", "download"}, }). WithMountedDirectory("/app", repo) } func DaggerBinary(c *dagger.Client) *dagger.File {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
internal/mage/util/util.go
return GoBase(c). Exec(dagger.ContainerExecOpts{ Args: []string{"go", "build", "-o", "./bin/cloak", "-ldflags", "-s -w", "./cmd/cloak"}, }). File("./bin/cloak") } const ( engineSessionBin = "dagger-engine-session" buildkitRepo = "github.com/moby/buildkit" buildkitBranch = "v0.10.5" ) func DevEngineContainer(c *dagger.Client, arches, oses []string) []*dagger.Container {
closed
dagger/dagger
https://github.com/dagger/dagger
3,733
Building shim can cause rate limit errors from dockerhub
We build our shim binary on the fly, which requires we pull an image. We currently pull from dockerhub, which can result in 429 throttling errors, which is surprising to users since they never even requested that image be pulled. Two options: 1. Pull the image from a different registry that doesn't have the same ratelimiting 2. Don't build the shim like that anymore; maybe pre-build it and include it in our engine image now that that's an option Current preference is 2. That approach would: 1. save us from managing another image. 2. make it easier for users to customize the engine image in the future or to mirror it on their own registries (i.e. an internal company one) 3. be more performant (pull everything at once) --- Option 2 is easier said than done unfortunately. There's not really a built-in way to just mount a binary from the **buildkitd's host filesystem** into arbitrary execs. Execs are supposed to be fully specified by LLB and LLB alone, and LLB can only include references to the **client's** host filesystem (which is not the same buildkitd's). There are bunch of possible approaches, but there's one that sticks out to me as being not that hacky and also *relatively* easy: hook into the runc executor. Right now, our buildkitd uses `runc` to start containers. We can use oci runtime hooks to modify the spec of those containers to include a mount of our shim binary into each container. * I know of [this existing tool](https://github.com/awslabs/oci-add-hooks) for accomplishing this pretty easily, but it's also not even that hard to roll our own if a compelling need arises. In the future if we decide to switch to the containerd backend then that same approach will probably still work because the default containerd runtime uses runc. But at that point we could also have our own runtime (containerd coincidentally also uses the terminology "shim"...) too, so there will be lots of options. Pros: 1. Fairly easy 2. Robust enough - just reuses existing well defined mechanisms for hooking into container runtimes 3. Doesn't require any upstream changes to buildkitd afaict 4. A nice re-usable mechanism in the future in case we ever need to mount anything else from the buildkitd host direct into the container Cons: 1. The shim binary will no longer be part of LLB and thus won't count towards the cache key. Changing the shim won't invalidate cache * Actually, as I write this, that might just make sense... the shim is a component of the runner, so just like changing buildkitd doesn't invalidate cache, neither should the shim? Sort of a gray area, but this isn't actually an obvious con I suppose.
https://github.com/dagger/dagger/issues/3733
https://github.com/dagger/dagger/pull/3913
9443adaa8f5bafe062eb757ac596c198391c9b61
32c1d82fa2c715a32e7d7d0c95d6a50e96c09fae
2022-11-08T19:17:23Z
go
2022-11-18T03:37:21Z
internal/mage/util/util.go
buildkitRepo := c.Git(buildkitRepo).Branch(buildkitBranch).Tree() platformVariants := make([]*dagger.Container, 0, len(arches)) for _, arch := range arches { buildkitBase := c.Container(dagger.ContainerOpts{ Platform: dagger.Platform("linux/" + arch), }).Build(buildkitRepo) for _, os := range oses { for _, arch := range arches { builtBin := GoBase(c). WithEnvVariable("GOOS", os). WithEnvVariable("GOARCH", arch). Exec(dagger.ContainerExecOpts{ Args: []string{"go", "build", "-o", "./bin/" + engineSessionBin, "-ldflags", "-s -w", "/app/cmd/engine-session"}, }). File("./bin/" + engineSessionBin) buildkitBase = buildkitBase.WithFS(