2014-10-23 04:45:23 +08:00
|
|
|
// +build linux
|
|
|
|
|
|
|
|
package libcontainer
|
|
|
|
|
2014-10-31 06:08:28 +08:00
|
|
|
import (
|
2015-10-17 23:35:36 +08:00
|
|
|
"bytes"
|
2015-02-12 08:45:23 +08:00
|
|
|
"encoding/json"
|
2018-01-23 01:03:02 +08:00
|
|
|
"errors"
|
2014-12-15 23:05:11 +08:00
|
|
|
"fmt"
|
2015-10-17 23:35:36 +08:00
|
|
|
"io"
|
2015-04-29 00:49:44 +08:00
|
|
|
"io/ioutil"
|
2017-03-02 16:02:15 +08:00
|
|
|
"net"
|
2014-12-15 23:05:11 +08:00
|
|
|
"os"
|
|
|
|
"os/exec"
|
2015-02-12 08:45:23 +08:00
|
|
|
"path/filepath"
|
2015-08-31 19:34:14 +08:00
|
|
|
"reflect"
|
2015-03-19 11:22:21 +08:00
|
|
|
"strings"
|
2015-02-14 06:41:37 +08:00
|
|
|
"sync"
|
2017-05-10 05:38:27 +08:00
|
|
|
"syscall" // only for SysProcAttr and Signal
|
2016-01-23 09:29:36 +08:00
|
|
|
"time"
|
2014-12-15 23:05:11 +08:00
|
|
|
|
2015-06-22 10:29:59 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/configs"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/criurpc"
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
2016-07-05 08:24:13 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/system"
|
2016-01-26 10:15:44 +08:00
|
|
|
"github.com/opencontainers/runc/libcontainer/utils"
|
2017-07-19 22:28:59 +08:00
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/sirupsen/logrus"
|
2015-09-14 08:40:43 +08:00
|
|
|
"github.com/syndtr/gocapability/capability"
|
2015-10-17 23:35:36 +08:00
|
|
|
"github.com/vishvananda/netlink/nl"
|
2017-07-19 22:28:59 +08:00
|
|
|
"golang.org/x/sys/unix"
|
2014-10-31 06:08:28 +08:00
|
|
|
)
|
2014-10-23 04:45:23 +08:00
|
|
|
|
2015-04-09 05:14:51 +08:00
|
|
|
const stdioFdCount = 3
|
|
|
|
|
2014-10-23 04:45:23 +08:00
|
|
|
type linuxContainer struct {
|
2016-07-05 08:24:13 +08:00
|
|
|
id string
|
|
|
|
root string
|
|
|
|
config *configs.Config
|
|
|
|
cgroupManager cgroups.Manager
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
intelRdtManager intelrdt.Manager
|
2017-05-19 12:54:02 +08:00
|
|
|
initPath string
|
2016-07-05 08:24:13 +08:00
|
|
|
initArgs []string
|
|
|
|
initProcess parentProcess
|
2017-06-15 06:38:45 +08:00
|
|
|
initProcessStartTime uint64
|
2016-07-05 08:24:13 +08:00
|
|
|
criuPath string
|
2017-07-21 01:33:01 +08:00
|
|
|
newuidmapPath string
|
|
|
|
newgidmapPath string
|
2016-07-05 08:24:13 +08:00
|
|
|
m sync.Mutex
|
|
|
|
criuVersion int
|
|
|
|
state containerState
|
|
|
|
created time.Time
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
|
|
|
|
2015-10-24 00:22:48 +08:00
|
|
|
// State represents a running container's state
|
|
|
|
type State struct {
|
|
|
|
BaseState
|
|
|
|
|
|
|
|
// Platform specific fields below here
|
|
|
|
|
2016-04-23 21:39:42 +08:00
|
|
|
// Specifies if the container was started under the rootless mode.
|
|
|
|
Rootless bool `json:"rootless"`
|
|
|
|
|
2015-10-24 00:22:48 +08:00
|
|
|
// Path to all the cgroups setup for a container. Key is cgroup subsystem name
|
|
|
|
// with the value as the path.
|
|
|
|
CgroupPaths map[string]string `json:"cgroup_paths"`
|
|
|
|
|
|
|
|
// NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
|
|
|
|
// with the value as the path.
|
|
|
|
NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
|
|
|
|
|
|
|
|
// Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
|
|
|
|
ExternalDescriptors []string `json:"external_descriptors,omitempty"`
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
|
|
|
|
// Intel RDT "resource control" filesystem path
|
|
|
|
IntelRdtPath string `json:"intel_rdt_path"`
|
2015-10-24 00:22:48 +08:00
|
|
|
}
|
|
|
|
|
2016-04-12 16:12:23 +08:00
|
|
|
// Container is a libcontainer container object.
|
2015-10-24 00:30:32 +08:00
|
|
|
//
|
|
|
|
// Each container is thread-safe within the same process. Since a container can
|
|
|
|
// be destroyed by a separate process, any function may return that the container
|
|
|
|
// was not found.
|
|
|
|
type Container interface {
|
|
|
|
BaseContainer
|
|
|
|
|
|
|
|
// Methods below here are platform specific
|
|
|
|
|
|
|
|
// Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
|
|
|
|
//
|
|
|
|
// errors:
|
|
|
|
// Systemerror - System error.
|
|
|
|
Checkpoint(criuOpts *CriuOpts) error
|
|
|
|
|
2016-03-27 12:44:16 +08:00
|
|
|
// Restore restores the checkpointed container to a running state using the criu(8) utility.
|
2015-10-24 00:30:32 +08:00
|
|
|
//
|
|
|
|
// errors:
|
|
|
|
// Systemerror - System error.
|
|
|
|
Restore(process *Process, criuOpts *CriuOpts) error
|
|
|
|
|
2016-09-20 10:49:04 +08:00
|
|
|
// If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses
|
2015-10-24 00:30:32 +08:00
|
|
|
// the execution of any user processes. Asynchronously, when the container finished being paused the
|
|
|
|
// state is changed to PAUSED.
|
|
|
|
// If the Container state is PAUSED, do nothing.
|
|
|
|
//
|
|
|
|
// errors:
|
2016-05-10 17:56:10 +08:00
|
|
|
// ContainerNotExists - Container no longer exists,
|
2016-09-20 10:49:04 +08:00
|
|
|
// ContainerNotRunning - Container not running or created,
|
2015-10-24 00:30:32 +08:00
|
|
|
// Systemerror - System error.
|
|
|
|
Pause() error
|
|
|
|
|
|
|
|
// If the Container state is PAUSED, resumes the execution of any user processes in the
|
|
|
|
// Container before setting the Container state to RUNNING.
|
|
|
|
// If the Container state is RUNNING, do nothing.
|
|
|
|
//
|
|
|
|
// errors:
|
2016-05-10 17:56:10 +08:00
|
|
|
// ContainerNotExists - Container no longer exists,
|
|
|
|
// ContainerNotPaused - Container is not paused,
|
2015-10-24 00:30:32 +08:00
|
|
|
// Systemerror - System error.
|
|
|
|
Resume() error
|
|
|
|
|
|
|
|
// NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
|
|
|
|
//
|
|
|
|
// errors:
|
|
|
|
// Systemerror - System error.
|
|
|
|
NotifyOOM() (<-chan struct{}, error)
|
2015-12-08 23:33:47 +08:00
|
|
|
|
|
|
|
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
|
|
|
|
//
|
|
|
|
// errors:
|
|
|
|
// Systemerror - System error.
|
|
|
|
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
|
2015-10-24 00:30:32 +08:00
|
|
|
}
|
|
|
|
|
2015-02-01 13:21:06 +08:00
|
|
|
// ID returns the container's unique ID
|
2014-10-23 04:45:23 +08:00
|
|
|
func (c *linuxContainer) ID() string {
|
|
|
|
return c.id
|
|
|
|
}
|
|
|
|
|
2015-02-01 13:21:06 +08:00
|
|
|
// Config returns the container's configuration
|
|
|
|
func (c *linuxContainer) Config() configs.Config {
|
|
|
|
return *c.config
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 08:45:23 +08:00
|
|
|
func (c *linuxContainer) Status() (Status, error) {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
|
|
|
return c.currentStatus()
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 06:45:07 +08:00
|
|
|
func (c *linuxContainer) State() (*State, error) {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
|
|
|
return c.currentState()
|
2015-02-12 06:45:07 +08:00
|
|
|
}
|
|
|
|
|
2014-10-23 07:27:06 +08:00
|
|
|
func (c *linuxContainer) Processes() ([]int, error) {
|
2016-01-09 03:37:18 +08:00
|
|
|
pids, err := c.cgroupManager.GetAllPids()
|
2014-10-23 04:45:23 +08:00
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
|
|
|
return pids, nil
|
|
|
|
}
|
|
|
|
|
2015-02-01 11:56:27 +08:00
|
|
|
func (c *linuxContainer) Stats() (*Stats, error) {
|
2014-10-23 04:45:23 +08:00
|
|
|
var (
|
|
|
|
err error
|
2015-02-01 11:56:27 +08:00
|
|
|
stats = &Stats{}
|
2014-10-23 04:45:23 +08:00
|
|
|
)
|
2014-12-06 09:02:49 +08:00
|
|
|
if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return stats, newSystemErrorWithCause(err, "getting container stats from cgroups")
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
if c.intelRdtManager != nil {
|
|
|
|
if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil {
|
|
|
|
return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats")
|
|
|
|
}
|
|
|
|
}
|
2015-02-07 13:12:27 +08:00
|
|
|
for _, iface := range c.config.Networks {
|
2015-02-10 07:16:27 +08:00
|
|
|
switch iface.Type {
|
|
|
|
case "veth":
|
2015-02-11 03:51:45 +08:00
|
|
|
istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
|
2015-02-10 07:16:27 +08:00
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName)
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
2015-02-10 07:16:27 +08:00
|
|
|
stats.Interfaces = append(stats.Interfaces, istats)
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
2014-10-23 04:45:23 +08:00
|
|
|
}
|
|
|
|
return stats, nil
|
|
|
|
}
|
2014-10-28 08:51:14 +08:00
|
|
|
|
2015-03-11 16:46:54 +08:00
|
|
|
func (c *linuxContainer) Set(config configs.Config) error {
|
2015-02-27 12:09:42 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2016-06-05 00:38:08 +08:00
|
|
|
status, err := c.currentStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if status == Stopped {
|
|
|
|
return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
|
|
|
|
}
|
2017-08-15 14:30:58 +08:00
|
|
|
if err := c.cgroupManager.Set(&config); err != nil {
|
|
|
|
// Set configs back
|
|
|
|
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
|
|
|
|
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
if c.intelRdtManager != nil {
|
|
|
|
if err := c.intelRdtManager.Set(&config); err != nil {
|
|
|
|
// Set configs back
|
|
|
|
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
|
|
|
|
logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-08-15 14:30:58 +08:00
|
|
|
// After config setting succeed, update config and states
|
2015-03-11 16:46:54 +08:00
|
|
|
c.config = &config
|
2017-08-15 14:30:58 +08:00
|
|
|
_, err = c.updateState(nil)
|
|
|
|
return err
|
2015-02-27 12:09:42 +08:00
|
|
|
}
|
|
|
|
|
2015-02-23 17:26:43 +08:00
|
|
|
func (c *linuxContainer) Start(process *Process) error {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
|
|
|
status, err := c.currentStatus()
|
2014-12-15 23:05:11 +08:00
|
|
|
if err != nil {
|
2015-02-23 17:26:43 +08:00
|
|
|
return err
|
2014-12-15 23:05:11 +08:00
|
|
|
}
|
2017-02-23 02:34:48 +08:00
|
|
|
if status == Stopped {
|
|
|
|
if err := c.createExecFifo(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := c.start(process, status == Stopped); err != nil {
|
|
|
|
if status == Stopped {
|
|
|
|
c.deleteExecFifo()
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2016-05-20 08:28:58 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 04:13:11 +08:00
|
|
|
func (c *linuxContainer) Run(process *Process) error {
|
2016-05-20 08:28:58 +08:00
|
|
|
c.m.Lock()
|
|
|
|
status, err := c.currentStatus()
|
|
|
|
if err != nil {
|
2017-02-23 02:34:48 +08:00
|
|
|
c.m.Unlock()
|
2016-05-20 08:28:58 +08:00
|
|
|
return err
|
|
|
|
}
|
2017-02-23 02:34:48 +08:00
|
|
|
c.m.Unlock()
|
|
|
|
if err := c.Start(process); err != nil {
|
2016-05-20 08:28:58 +08:00
|
|
|
return err
|
|
|
|
}
|
2016-06-07 04:15:18 +08:00
|
|
|
if status == Stopped {
|
|
|
|
return c.exec()
|
2016-05-20 08:28:58 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-07 04:15:18 +08:00
|
|
|
func (c *linuxContainer) Exec() error {
|
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
|
|
|
return c.exec()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) exec() error {
|
|
|
|
path := filepath.Join(c.root, execFifoFilename)
|
2018-01-23 01:03:02 +08:00
|
|
|
|
|
|
|
fifoOpen := make(chan struct{})
|
|
|
|
select {
|
|
|
|
case <-awaitProcessExit(c.initProcess.pid(), fifoOpen):
|
|
|
|
return errors.New("container process is already dead")
|
|
|
|
case result := <-awaitFifoOpen(path):
|
|
|
|
close(fifoOpen)
|
|
|
|
if result.err != nil {
|
|
|
|
return result.err
|
|
|
|
}
|
|
|
|
f := result.file
|
|
|
|
defer f.Close()
|
|
|
|
if err := readFromExecFifo(f); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return os.Remove(path)
|
2016-06-07 04:15:18 +08:00
|
|
|
}
|
2018-01-23 01:03:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func readFromExecFifo(execFifo io.Reader) error {
|
|
|
|
data, err := ioutil.ReadAll(execFifo)
|
2016-06-07 04:15:18 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-23 01:03:02 +08:00
|
|
|
if len(data) <= 0 {
|
|
|
|
return fmt.Errorf("cannot start an already running container")
|
2016-06-07 04:15:18 +08:00
|
|
|
}
|
2018-01-23 01:03:02 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func awaitProcessExit(pid int, exit <-chan struct{}) <-chan struct{} {
|
|
|
|
isDead := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-exit:
|
|
|
|
return
|
|
|
|
case <-time.After(time.Millisecond * 100):
|
|
|
|
stat, err := system.Stat(pid)
|
|
|
|
if err != nil || stat.State == system.Zombie {
|
|
|
|
close(isDead)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return isDead
|
|
|
|
}
|
|
|
|
|
|
|
|
func awaitFifoOpen(path string) <-chan openResult {
|
|
|
|
fifoOpened := make(chan openResult)
|
|
|
|
go func() {
|
|
|
|
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
|
|
|
if err != nil {
|
|
|
|
fifoOpened <- openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")}
|
2018-01-23 18:46:31 +08:00
|
|
|
return
|
2018-01-23 01:03:02 +08:00
|
|
|
}
|
|
|
|
fifoOpened <- openResult{file: f}
|
|
|
|
}()
|
|
|
|
return fifoOpened
|
|
|
|
}
|
|
|
|
|
|
|
|
type openResult struct {
|
|
|
|
file *os.File
|
|
|
|
err error
|
2016-06-07 04:15:18 +08:00
|
|
|
}
|
|
|
|
|
2016-05-20 08:28:58 +08:00
|
|
|
func (c *linuxContainer) start(process *Process, isInit bool) error {
|
|
|
|
parent, err := c.newParentProcess(process, isInit)
|
2015-02-07 13:12:27 +08:00
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return newSystemErrorWithCause(err, "creating new parent process")
|
2015-02-07 04:48:57 +08:00
|
|
|
}
|
2015-02-07 13:12:27 +08:00
|
|
|
if err := parent.start(); err != nil {
|
|
|
|
// terminate the process to ensure that it properly is reaped.
|
2017-01-25 07:24:05 +08:00
|
|
|
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
|
2015-05-06 21:14:04 +08:00
|
|
|
logrus.Warn(err)
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
2016-04-19 02:37:26 +08:00
|
|
|
return newSystemErrorWithCause(err, "starting container process")
|
2015-02-07 04:48:57 +08:00
|
|
|
}
|
2016-01-23 09:29:36 +08:00
|
|
|
// generate a timestamp indicating when the container was started
|
2016-01-29 05:32:24 +08:00
|
|
|
c.created = time.Now().UTC()
|
2016-05-20 08:28:58 +08:00
|
|
|
if isInit {
|
|
|
|
c.state = &createdState{
|
|
|
|
c: c,
|
|
|
|
}
|
2016-07-05 08:24:13 +08:00
|
|
|
state, err := c.updateState(parent)
|
|
|
|
if err != nil {
|
2015-10-03 02:16:50 +08:00
|
|
|
return err
|
|
|
|
}
|
2016-07-05 08:24:13 +08:00
|
|
|
c.initProcessStartTime = state.InitProcessStartTime
|
|
|
|
|
2016-01-22 08:43:33 +08:00
|
|
|
if c.config.Hooks != nil {
|
2018-01-11 22:31:27 +08:00
|
|
|
bundle, annotations := utils.Annotations(c.config.Labels)
|
2016-01-22 08:43:33 +08:00
|
|
|
s := configs.HookState{
|
2018-01-11 22:31:27 +08:00
|
|
|
Version: c.config.Version,
|
|
|
|
ID: c.id,
|
|
|
|
Pid: parent.pid(),
|
|
|
|
Bundle: bundle,
|
|
|
|
Annotations: annotations,
|
2016-01-22 08:43:33 +08:00
|
|
|
}
|
2016-04-19 02:37:26 +08:00
|
|
|
for i, hook := range c.config.Hooks.Poststart {
|
2016-01-22 08:43:33 +08:00
|
|
|
if err := hook.Run(s); err != nil {
|
2017-01-25 07:24:05 +08:00
|
|
|
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
|
2016-01-22 08:43:33 +08:00
|
|
|
logrus.Warn(err)
|
|
|
|
}
|
2016-04-19 02:37:26 +08:00
|
|
|
return newSystemErrorWithCausef(err, "running poststart hook %d", i)
|
2015-11-07 07:03:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-04 17:38:04 +08:00
|
|
|
} else {
|
|
|
|
c.state = &runningState{
|
|
|
|
c: c,
|
|
|
|
}
|
2015-11-07 07:03:32 +08:00
|
|
|
}
|
2015-02-23 17:26:43 +08:00
|
|
|
return nil
|
2015-02-07 04:48:57 +08:00
|
|
|
}
|
|
|
|
|
2016-11-08 07:22:27 +08:00
|
|
|
func (c *linuxContainer) Signal(s os.Signal, all bool) error {
|
|
|
|
if all {
|
|
|
|
return signalAllProcesses(c.cgroupManager, s)
|
|
|
|
}
|
2015-08-04 07:48:19 +08:00
|
|
|
if err := c.initProcess.signal(s); err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return newSystemErrorWithCause(err, "signaling init process")
|
2015-08-04 07:48:19 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:34:48 +08:00
|
|
|
func (c *linuxContainer) createExecFifo() error {
|
2017-03-18 01:32:16 +08:00
|
|
|
rootuid, err := c.Config().HostRootUID()
|
2017-02-23 02:34:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-18 01:32:16 +08:00
|
|
|
rootgid, err := c.Config().HostRootGID()
|
2017-02-23 02:34:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fifoName := filepath.Join(c.root, execFifoFilename)
|
|
|
|
if _, err := os.Stat(fifoName); err == nil {
|
|
|
|
return fmt.Errorf("exec fifo %s already exists", fifoName)
|
|
|
|
}
|
2017-05-10 05:38:27 +08:00
|
|
|
oldMask := unix.Umask(0000)
|
|
|
|
if err := unix.Mkfifo(fifoName, 0622); err != nil {
|
|
|
|
unix.Umask(oldMask)
|
2017-02-23 02:34:48 +08:00
|
|
|
return err
|
|
|
|
}
|
2017-05-10 05:38:27 +08:00
|
|
|
unix.Umask(oldMask)
|
2017-02-23 02:34:48 +08:00
|
|
|
if err := os.Chown(fifoName, rootuid, rootgid); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) deleteExecFifo() {
|
|
|
|
fifoName := filepath.Join(c.root, execFifoFilename)
|
|
|
|
os.Remove(fifoName)
|
|
|
|
}
|
|
|
|
|
2017-08-24 15:37:26 +08:00
|
|
|
// includeExecFifo opens the container's execfifo as a pathfd, so that the
|
|
|
|
// container cannot access the statedir (and the FIFO itself remains
|
|
|
|
// un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited
|
|
|
|
// fd, with _LIBCONTAINER_FIFOFD set to its fd number.
|
|
|
|
func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error {
|
|
|
|
fifoName := filepath.Join(c.root, execFifoFilename)
|
|
|
|
fifoFd, err := unix.Open(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fifoFd), fifoName))
|
|
|
|
cmd.Env = append(cmd.Env,
|
|
|
|
fmt.Sprintf("_LIBCONTAINER_FIFOFD=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-07 13:12:27 +08:00
|
|
|
func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) {
|
2017-03-03 04:53:06 +08:00
|
|
|
parentPipe, childPipe, err := utils.NewSockPair("init")
|
2015-02-07 13:12:27 +08:00
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return nil, newSystemErrorWithCause(err, "creating new init pipe")
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
2016-11-28 22:25:06 +08:00
|
|
|
cmd, err := c.commandTemplate(p, childPipe)
|
2015-02-07 13:12:27 +08:00
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return nil, newSystemErrorWithCause(err, "creating new command template")
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
|
|
|
if !doInit {
|
2016-11-28 22:25:06 +08:00
|
|
|
return c.newSetnsProcess(p, cmd, parentPipe, childPipe)
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
2016-11-28 22:25:06 +08:00
|
|
|
|
2017-08-24 15:37:26 +08:00
|
|
|
// We only set up fifoFd if we're not doing a `runc exec`. The historic
|
|
|
|
// reason for this is that previously we would pass a dirfd that allowed
|
|
|
|
// for container rootfs escape (and not doing it in `runc exec` avoided
|
|
|
|
// that problem), but we no longer do that. However, there's no need to do
|
|
|
|
// this for `runc exec` so we just keep it this way to be safe.
|
|
|
|
if err := c.includeExecFifo(cmd); err != nil {
|
|
|
|
return nil, newSystemErrorWithCause(err, "including execfifo in cmd.Exec setup")
|
2016-11-28 22:25:06 +08:00
|
|
|
}
|
2017-08-24 15:37:26 +08:00
|
|
|
return c.newInitProcess(p, cmd, parentPipe, childPipe)
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
|
|
|
|
2016-11-28 22:25:06 +08:00
|
|
|
func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) {
|
2017-05-19 12:54:02 +08:00
|
|
|
cmd := exec.Command(c.initPath, c.initArgs[1:]...)
|
|
|
|
cmd.Args[0] = c.initArgs[0]
|
2015-02-07 13:12:27 +08:00
|
|
|
cmd.Stdin = p.Stdin
|
|
|
|
cmd.Stdout = p.Stdout
|
|
|
|
cmd.Stderr = p.Stderr
|
2015-02-04 09:44:58 +08:00
|
|
|
cmd.Dir = c.config.Rootfs
|
2014-12-23 06:06:22 +08:00
|
|
|
if cmd.SysProcAttr == nil {
|
|
|
|
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
|
|
|
}
|
2017-03-03 04:53:06 +08:00
|
|
|
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
|
|
|
|
if p.ConsoleSocket != nil {
|
|
|
|
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
|
|
|
|
cmd.Env = append(cmd.Env,
|
|
|
|
fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe)
|
2016-06-07 04:15:18 +08:00
|
|
|
cmd.Env = append(cmd.Env,
|
2017-03-03 04:53:06 +08:00
|
|
|
fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
|
|
|
|
)
|
2015-04-03 04:55:55 +08:00
|
|
|
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
|
|
|
|
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
|
|
|
|
// even with the parent still running.
|
2015-02-12 08:45:23 +08:00
|
|
|
if c.config.ParentDeathSignal > 0 {
|
|
|
|
cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal)
|
|
|
|
}
|
2015-02-07 13:12:27 +08:00
|
|
|
return cmd, nil
|
2014-12-15 23:05:11 +08:00
|
|
|
}
|
|
|
|
|
2017-08-24 15:37:26 +08:00
|
|
|
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) {
|
2015-09-14 08:40:43 +08:00
|
|
|
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
|
|
|
|
nsMaps := make(map[configs.NamespaceType]string)
|
|
|
|
for _, ns := range c.config.Namespaces {
|
|
|
|
if ns.Path != "" {
|
|
|
|
nsMaps[ns.Type] = ns.Path
|
2015-02-07 04:48:57 +08:00
|
|
|
}
|
2015-02-01 13:21:06 +08:00
|
|
|
}
|
2015-09-14 08:40:43 +08:00
|
|
|
_, sharePidns := nsMaps[configs.NEWPID]
|
2016-06-03 23:29:34 +08:00
|
|
|
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps)
|
2015-09-14 08:40:43 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-02-07 13:12:27 +08:00
|
|
|
return &initProcess{
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
cmd: cmd,
|
|
|
|
childPipe: childPipe,
|
|
|
|
parentPipe: parentPipe,
|
|
|
|
manager: c.cgroupManager,
|
|
|
|
intelRdtManager: c.intelRdtManager,
|
|
|
|
config: c.newInitConfig(p),
|
|
|
|
container: c,
|
|
|
|
process: p,
|
|
|
|
bootstrapData: data,
|
|
|
|
sharePidns: sharePidns,
|
2015-02-13 04:58:40 +08:00
|
|
|
}, nil
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
|
|
|
|
2016-11-28 22:25:06 +08:00
|
|
|
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) {
|
2016-01-20 11:02:31 +08:00
|
|
|
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
|
2015-09-14 08:40:43 +08:00
|
|
|
state, err := c.currentState()
|
|
|
|
if err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return nil, newSystemErrorWithCause(err, "getting container's current state")
|
2015-09-14 08:40:43 +08:00
|
|
|
}
|
2016-12-01 15:23:58 +08:00
|
|
|
// for setns process, we don't have to set cloneflags as the process namespaces
|
2015-10-17 23:35:36 +08:00
|
|
|
// will only be set via setns syscall
|
2016-06-03 23:29:34 +08:00
|
|
|
data, err := c.bootstrapData(0, state.NamespacePaths)
|
2015-10-17 23:35:36 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-03-05 08:04:20 +08:00
|
|
|
}
|
2015-02-07 13:12:27 +08:00
|
|
|
return &setnsProcess{
|
2015-10-17 23:35:36 +08:00
|
|
|
cmd: cmd,
|
|
|
|
cgroupPaths: c.cgroupManager.GetPaths(),
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
intelRdtPath: state.IntelRdtPath,
|
2015-10-17 23:35:36 +08:00
|
|
|
childPipe: childPipe,
|
|
|
|
parentPipe: parentPipe,
|
|
|
|
config: c.newInitConfig(p),
|
|
|
|
process: p,
|
|
|
|
bootstrapData: data,
|
2015-10-17 23:14:26 +08:00
|
|
|
}, nil
|
2015-02-07 13:12:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
|
2016-03-04 02:44:33 +08:00
|
|
|
cfg := &initConfig{
|
2015-04-01 05:40:05 +08:00
|
|
|
Config: c.config,
|
|
|
|
Args: process.Args,
|
|
|
|
Env: process.Env,
|
|
|
|
User: process.User,
|
2016-06-10 18:35:13 +08:00
|
|
|
AdditionalGroups: process.AdditionalGroups,
|
2015-04-01 05:40:05 +08:00
|
|
|
Cwd: process.Cwd,
|
|
|
|
Capabilities: process.Capabilities,
|
|
|
|
PassedFilesCount: len(process.ExtraFiles),
|
2016-02-23 04:36:12 +08:00
|
|
|
ContainerId: c.ID(),
|
2016-03-04 02:44:33 +08:00
|
|
|
NoNewPrivileges: c.config.NoNewPrivileges,
|
2016-04-23 21:39:42 +08:00
|
|
|
Rootless: c.config.Rootless,
|
2016-03-04 02:44:33 +08:00
|
|
|
AppArmorProfile: c.config.AppArmorProfile,
|
|
|
|
ProcessLabel: c.config.ProcessLabel,
|
2016-03-11 06:35:16 +08:00
|
|
|
Rlimits: c.config.Rlimits,
|
2015-02-01 13:21:06 +08:00
|
|
|
}
|
2016-03-04 02:44:33 +08:00
|
|
|
if process.NoNewPrivileges != nil {
|
|
|
|
cfg.NoNewPrivileges = *process.NoNewPrivileges
|
|
|
|
}
|
|
|
|
if process.AppArmorProfile != "" {
|
|
|
|
cfg.AppArmorProfile = process.AppArmorProfile
|
|
|
|
}
|
|
|
|
if process.Label != "" {
|
|
|
|
cfg.ProcessLabel = process.Label
|
|
|
|
}
|
2016-03-11 06:35:16 +08:00
|
|
|
if len(process.Rlimits) > 0 {
|
|
|
|
cfg.Rlimits = process.Rlimits
|
|
|
|
}
|
2017-03-03 04:53:06 +08:00
|
|
|
cfg.CreateConsole = process.ConsoleSocket != nil
|
2017-09-26 21:39:46 +08:00
|
|
|
cfg.ConsoleWidth = process.ConsoleWidth
|
|
|
|
cfg.ConsoleHeight = process.ConsoleHeight
|
2016-03-04 02:44:33 +08:00
|
|
|
return cfg
|
2014-12-15 23:05:11 +08:00
|
|
|
}
|
|
|
|
|
2014-10-28 08:51:14 +08:00
|
|
|
func (c *linuxContainer) Destroy() error {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2015-10-03 02:16:50 +08:00
|
|
|
return c.state.destroy()
|
2014-10-28 08:51:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) Pause() error {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2016-01-22 08:43:33 +08:00
|
|
|
status, err := c.currentStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-20 08:28:58 +08:00
|
|
|
switch status {
|
|
|
|
case Running, Created:
|
|
|
|
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return c.state.transition(&pausedState{
|
|
|
|
c: c,
|
|
|
|
})
|
2015-10-03 02:16:50 +08:00
|
|
|
}
|
2016-09-20 10:49:04 +08:00
|
|
|
return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning)
|
2014-10-28 08:51:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) Resume() error {
|
2015-02-14 06:41:37 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2016-01-22 08:43:33 +08:00
|
|
|
status, err := c.currentStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if status != Paused {
|
|
|
|
return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
|
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return c.state.transition(&runningState{
|
|
|
|
c: c,
|
|
|
|
})
|
2014-10-28 08:51:14 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 07:09:54 +08:00
|
|
|
func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
|
2016-04-26 00:19:39 +08:00
|
|
|
// XXX(cyphar): This requires cgroups.
|
|
|
|
if c.config.Rootless {
|
|
|
|
return nil, fmt.Errorf("cannot get OOM notifications from rootless container")
|
|
|
|
}
|
2015-02-12 09:12:03 +08:00
|
|
|
return notifyOnOOM(c.cgroupManager.GetPaths())
|
2015-02-01 13:21:06 +08:00
|
|
|
}
|
2015-02-12 08:45:23 +08:00
|
|
|
|
2015-12-08 23:33:47 +08:00
|
|
|
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
|
2016-04-26 00:19:39 +08:00
|
|
|
// XXX(cyphar): This requires cgroups.
|
|
|
|
if c.config.Rootless {
|
|
|
|
return nil, fmt.Errorf("cannot get memory pressure notifications from rootless container")
|
|
|
|
}
|
2015-12-08 23:33:47 +08:00
|
|
|
return notifyMemoryPressure(c.cgroupManager.GetPaths(), level)
|
|
|
|
}
|
|
|
|
|
2017-03-15 04:21:58 +08:00
|
|
|
var criuFeatures *criurpc.CriuFeatures
|
|
|
|
|
|
|
|
func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
|
|
|
|
|
|
|
|
var t criurpc.CriuReqType
|
|
|
|
t = criurpc.CriuReqType_FEATURE_CHECK
|
|
|
|
|
2017-07-25 00:03:50 +08:00
|
|
|
// criu 1.8 => 10800
|
|
|
|
if err := c.checkCriuVersion(10800); err != nil {
|
2017-03-15 04:21:58 +08:00
|
|
|
// Feature checking was introduced with CRIU 1.8.
|
|
|
|
// Ignore the feature check if an older CRIU version is used
|
|
|
|
// and just act as before.
|
|
|
|
// As all automated PR testing is done using CRIU 1.7 this
|
|
|
|
// code will not be tested by automated PR testing.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure the features we are looking for are really not from
|
|
|
|
// some previous check
|
|
|
|
criuFeatures = nil
|
|
|
|
|
|
|
|
req := &criurpc.CriuReq{
|
|
|
|
Type: &t,
|
|
|
|
// Theoretically this should not be necessary but CRIU
|
|
|
|
// segfaults if Opts is empty.
|
|
|
|
// Fixed in CRIU 2.12
|
|
|
|
Opts: rpcOpts,
|
|
|
|
Features: criuFeat,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := c.criuSwrk(nil, req, criuOpts, false)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("%s", err)
|
|
|
|
return fmt.Errorf("CRIU feature check failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Debugf("Feature check says: %s", criuFeatures)
|
|
|
|
missingFeatures := false
|
|
|
|
|
2017-07-28 16:44:45 +08:00
|
|
|
// The outer if checks if the fields actually exist
|
|
|
|
if (criuFeat.MemTrack != nil) &&
|
|
|
|
(criuFeatures.MemTrack != nil) {
|
|
|
|
// The inner if checks if they are set to true
|
|
|
|
if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
|
|
|
|
missingFeatures = true
|
|
|
|
logrus.Debugf("CRIU does not support MemTrack")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This needs to be repeated for every new feature check.
|
|
|
|
// Is there a way to put this in a function. Reflection?
|
|
|
|
if (criuFeat.LazyPages != nil) &&
|
|
|
|
(criuFeatures.LazyPages != nil) {
|
|
|
|
if *criuFeat.LazyPages && !*criuFeatures.LazyPages {
|
|
|
|
missingFeatures = true
|
|
|
|
logrus.Debugf("CRIU does not support LazyPages")
|
|
|
|
}
|
2017-03-15 04:21:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if missingFeatures {
|
|
|
|
return fmt.Errorf("CRIU is missing features")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
func parseCriuVersion(path string) (int, error) {
|
2017-07-25 00:03:50 +08:00
|
|
|
var x, y, z int
|
2015-04-01 23:15:00 +08:00
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
out, err := exec.Command(path, "-V").Output()
|
2015-05-19 19:48:59 +08:00
|
|
|
if err != nil {
|
2017-07-26 14:49:09 +08:00
|
|
|
return 0, fmt.Errorf("Unable to execute CRIU command: %s", path)
|
2015-05-19 19:48:59 +08:00
|
|
|
}
|
2015-08-06 23:14:59 +08:00
|
|
|
|
|
|
|
x = 0
|
|
|
|
y = 0
|
|
|
|
z = 0
|
|
|
|
if ep := strings.Index(string(out), "-"); ep >= 0 {
|
|
|
|
// criu Git version format
|
|
|
|
var version string
|
|
|
|
if sp := strings.Index(string(out), "GitID"); sp > 0 {
|
|
|
|
version = string(out)[sp:ep]
|
|
|
|
} else {
|
2017-07-26 14:49:09 +08:00
|
|
|
return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path)
|
2015-08-06 23:14:59 +08:00
|
|
|
}
|
|
|
|
|
2017-09-25 16:41:57 +08:00
|
|
|
n, err := fmt.Sscanf(version, "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2
|
2015-08-06 23:14:59 +08:00
|
|
|
if err != nil {
|
2017-09-25 16:41:57 +08:00
|
|
|
n, err = fmt.Sscanf(version, "GitID: v%d.%d", &x, &y) // 1.6
|
2015-08-06 23:14:59 +08:00
|
|
|
y++
|
|
|
|
} else {
|
|
|
|
z++
|
|
|
|
}
|
|
|
|
if n < 2 || err != nil {
|
2017-07-26 14:49:09 +08:00
|
|
|
return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err)
|
2015-08-06 23:14:59 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// criu release version format
|
|
|
|
n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2
|
|
|
|
if err != nil {
|
|
|
|
n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6
|
|
|
|
}
|
|
|
|
if n < 2 || err != nil {
|
2017-07-26 14:49:09 +08:00
|
|
|
return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err)
|
2015-08-06 23:14:59 +08:00
|
|
|
}
|
2015-04-01 23:15:00 +08:00
|
|
|
}
|
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
return x*10000 + y*100 + z, nil
|
|
|
|
}
|
2015-08-31 19:34:14 +08:00
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
func compareCriuVersion(criuVersion int, minVersion int) error {
|
|
|
|
// simple function to perform the actual version compare
|
|
|
|
if criuVersion < minVersion {
|
|
|
|
return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
|
2015-04-01 23:15:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:55:56 +08:00
|
|
|
// This is used to store the result of criu version RPC
|
|
|
|
var criuVersionRPC *criurpc.CriuVersion
|
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
// checkCriuVersion checks Criu version greater than or equal to minVersion
|
|
|
|
func (c *linuxContainer) checkCriuVersion(minVersion int) error {
|
|
|
|
|
|
|
|
// If the version of criu has already been determined there is no need
|
|
|
|
// to ask criu for the version again. Use the value from c.criuVersion.
|
|
|
|
if c.criuVersion != 0 {
|
|
|
|
return compareCriuVersion(c.criuVersion, minVersion)
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:55:56 +08:00
|
|
|
// First try if this version of CRIU support the version RPC.
|
|
|
|
// The CRIU version RPC was introduced with CRIU 3.0.
|
|
|
|
|
|
|
|
// First, reset the variable for the RPC answer to nil
|
|
|
|
criuVersionRPC = nil
|
|
|
|
|
|
|
|
var t criurpc.CriuReqType
|
|
|
|
t = criurpc.CriuReqType_VERSION
|
|
|
|
req := &criurpc.CriuReq{
|
|
|
|
Type: &t,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := c.criuSwrk(nil, req, nil, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("CRIU version check failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if criuVersionRPC != nil {
|
|
|
|
logrus.Debugf("CRIU version: %s", criuVersionRPC)
|
|
|
|
// major and minor are always set
|
|
|
|
c.criuVersion = int(*criuVersionRPC.Major) * 10000
|
|
|
|
c.criuVersion += int(*criuVersionRPC.Minor) * 100
|
|
|
|
if criuVersionRPC.Sublevel != nil {
|
|
|
|
c.criuVersion += int(*criuVersionRPC.Sublevel)
|
|
|
|
}
|
|
|
|
if criuVersionRPC.Gitid != nil {
|
|
|
|
// runc's convention is that a CRIU git release is
|
|
|
|
// always the same as increasing the minor by 1
|
|
|
|
c.criuVersion -= (c.criuVersion % 100)
|
|
|
|
c.criuVersion += 100
|
|
|
|
}
|
|
|
|
return compareCriuVersion(c.criuVersion, minVersion)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is CRIU without the version RPC and therefore
|
|
|
|
// older than 3.0. Parsing the output is required.
|
|
|
|
|
|
|
|
// This can be remove once runc does not work with criu older than 3.0
|
|
|
|
|
2017-07-26 14:49:09 +08:00
|
|
|
c.criuVersion, err = parseCriuVersion(c.criuPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return compareCriuVersion(c.criuVersion, minVersion)
|
|
|
|
}
|
|
|
|
|
2015-08-05 05:44:45 +08:00
|
|
|
const descriptorsFilename = "descriptors.json"
|
2015-05-05 02:25:43 +08:00
|
|
|
|
2015-07-21 02:25:22 +08:00
|
|
|
func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
|
|
|
|
mountDest := m.Destination
|
|
|
|
if strings.HasPrefix(mountDest, c.config.Rootfs) {
|
|
|
|
mountDest = mountDest[len(c.config.Rootfs):]
|
|
|
|
}
|
|
|
|
|
|
|
|
extMnt := &criurpc.ExtMountMap{
|
|
|
|
Key: proto.String(mountDest),
|
|
|
|
Val: proto.String(mountDest),
|
|
|
|
}
|
|
|
|
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
|
|
|
|
}
|
|
|
|
|
2016-10-13 09:15:18 +08:00
|
|
|
func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
|
|
|
|
for _, path := range c.config.MaskPaths {
|
|
|
|
fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fi.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
extMnt := &criurpc.ExtMountMap{
|
|
|
|
Key: proto.String(path),
|
|
|
|
Val: proto.String("/dev/null"),
|
|
|
|
}
|
|
|
|
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
|
|
|
|
}
|
2017-07-24 23:43:14 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForCriuLazyServer(r *os.File, status string) error {
|
|
|
|
|
|
|
|
data := make([]byte, 1)
|
|
|
|
_, err := r.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fd, err := os.OpenFile(status, os.O_TRUNC|os.O_WRONLY, os.ModeAppend)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = fd.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fd.Close()
|
2016-10-13 09:15:18 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-19 09:28:40 +08:00
|
|
|
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
|
2015-03-13 12:45:43 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2015-04-01 23:15:00 +08:00
|
|
|
|
2016-04-23 21:39:42 +08:00
|
|
|
// TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
|
|
|
|
// support for doing unprivileged dumps, but the setup of
|
|
|
|
// rootless containers might make this complicated.
|
|
|
|
if c.config.Rootless {
|
|
|
|
return fmt.Errorf("cannot checkpoint a rootless container")
|
|
|
|
}
|
|
|
|
|
2017-07-25 00:03:50 +08:00
|
|
|
// criu 1.5.2 => 10502
|
|
|
|
if err := c.checkCriuVersion(10502); err != nil {
|
2015-04-01 23:15:00 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-19 09:28:40 +08:00
|
|
|
if criuOpts.ImagesDirectory == "" {
|
2015-10-03 02:16:50 +08:00
|
|
|
return fmt.Errorf("invalid directory to save checkpoint")
|
2015-04-19 09:28:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since a container can be C/R'ed multiple times,
|
|
|
|
// the checkpoint directory may already exist.
|
|
|
|
if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) {
|
2015-03-07 03:32:16 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-04-10 23:19:14 +08:00
|
|
|
|
2015-04-19 09:28:40 +08:00
|
|
|
if criuOpts.WorkDirectory == "" {
|
|
|
|
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
workDir, err := os.Open(criuOpts.WorkDirectory)
|
2015-04-10 23:19:14 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer workDir.Close()
|
|
|
|
|
2015-04-19 09:28:40 +08:00
|
|
|
imageDir, err := os.Open(criuOpts.ImagesDirectory)
|
2015-04-10 23:19:14 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer imageDir.Close()
|
2015-04-19 09:28:40 +08:00
|
|
|
|
|
|
|
rpcOpts := criurpc.CriuOpts{
|
2017-03-02 16:02:15 +08:00
|
|
|
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
|
|
|
|
WorkDirFd: proto.Int32(int32(workDir.Fd())),
|
|
|
|
LogLevel: proto.Int32(4),
|
|
|
|
LogFile: proto.String("dump.log"),
|
|
|
|
Root: proto.String(c.config.Rootfs),
|
|
|
|
ManageCgroups: proto.Bool(true),
|
|
|
|
NotifyScripts: proto.Bool(true),
|
|
|
|
Pid: proto.Int32(int32(c.initProcess.pid())),
|
|
|
|
ShellJob: proto.Bool(criuOpts.ShellJob),
|
|
|
|
LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
|
|
|
|
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
|
|
|
|
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
|
|
|
|
FileLocks: proto.Bool(criuOpts.FileLocks),
|
|
|
|
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
|
|
|
|
OrphanPtsMaster: proto.Bool(true),
|
2017-08-18 06:31:49 +08:00
|
|
|
AutoDedup: proto.Bool(criuOpts.AutoDedup),
|
2017-07-24 23:43:14 +08:00
|
|
|
LazyPages: proto.Bool(criuOpts.LazyPages),
|
2015-04-19 09:28:40 +08:00
|
|
|
}
|
|
|
|
|
2017-03-23 05:41:12 +08:00
|
|
|
fcg := c.cgroupManager.GetPaths()["freezer"]
|
|
|
|
if fcg != "" {
|
|
|
|
rpcOpts.FreezeCgroup = proto.String(fcg)
|
2015-04-19 09:28:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-24 04:16:47 +08:00
|
|
|
// append optional criu opts, e.g., page-server and port
|
2015-04-24 23:13:15 +08:00
|
|
|
if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
|
2015-04-24 04:16:47 +08:00
|
|
|
rpcOpts.Ps = &criurpc.CriuPageServerInfo{
|
2015-04-24 23:13:15 +08:00
|
|
|
Address: proto.String(criuOpts.PageServer.Address),
|
|
|
|
Port: proto.Int32(criuOpts.PageServer.Port),
|
2015-04-24 04:16:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
//pre-dump may need parentImage param to complete iterative migration
|
|
|
|
if criuOpts.ParentImage != "" {
|
|
|
|
rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
|
|
|
|
rpcOpts.TrackMem = proto.Bool(true)
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:14:59 +08:00
|
|
|
// append optional manage cgroups mode
|
|
|
|
if criuOpts.ManageCgroupsMode != 0 {
|
2017-07-25 00:03:50 +08:00
|
|
|
// criu 1.7 => 10700
|
|
|
|
if err := c.checkCriuVersion(10700); err != nil {
|
2015-08-06 23:14:59 +08:00
|
|
|
return err
|
|
|
|
}
|
2016-02-19 06:08:23 +08:00
|
|
|
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
|
|
|
|
rpcOpts.ManageCgroupsMode = &mode
|
2015-08-06 23:14:59 +08:00
|
|
|
}
|
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
var t criurpc.CriuReqType
|
|
|
|
if criuOpts.PreDump {
|
2017-03-15 04:21:58 +08:00
|
|
|
feat := criurpc.CriuFeatures{
|
|
|
|
MemTrack: proto.Bool(true),
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
t = criurpc.CriuReqType_PRE_DUMP
|
|
|
|
} else {
|
|
|
|
t = criurpc.CriuReqType_DUMP
|
|
|
|
}
|
2015-07-21 02:25:22 +08:00
|
|
|
req := &criurpc.CriuReq{
|
2015-04-10 23:19:14 +08:00
|
|
|
Type: &t,
|
2015-04-19 09:28:40 +08:00
|
|
|
Opts: &rpcOpts,
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
2015-04-19 09:28:40 +08:00
|
|
|
|
2017-07-24 23:43:14 +08:00
|
|
|
if criuOpts.LazyPages {
|
|
|
|
// lazy migration requested; check if criu supports it
|
|
|
|
feat := criurpc.CriuFeatures{
|
|
|
|
LazyPages: proto.Bool(true),
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
statusRead, statusWrite, err := os.Pipe()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rpcOpts.StatusFd = proto.Int32(int32(statusWrite.Fd()))
|
|
|
|
go waitForCriuLazyServer(statusRead, criuOpts.StatusFd)
|
|
|
|
}
|
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
//no need to dump these information in pre-dump
|
|
|
|
if !criuOpts.PreDump {
|
|
|
|
for _, m := range c.config.Mounts {
|
|
|
|
switch m.Device {
|
|
|
|
case "bind":
|
|
|
|
c.addCriuDumpMount(req, m)
|
|
|
|
case "cgroup":
|
|
|
|
binds, err := getCgroupMounts(m)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, b := range binds {
|
|
|
|
c.addCriuDumpMount(req, b)
|
|
|
|
}
|
2015-07-21 02:25:22 +08:00
|
|
|
}
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
2016-10-13 09:15:18 +08:00
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
if err := c.addMaskPaths(req); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-10-13 09:15:18 +08:00
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
for _, node := range c.config.Devices {
|
|
|
|
m := &configs.Mount{Destination: node.Path, Source: node.Path}
|
|
|
|
c.addCriuDumpMount(req, m)
|
|
|
|
}
|
2015-04-29 00:49:44 +08:00
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
// Write the FD info to a file in the image directory
|
|
|
|
fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-29 00:49:44 +08:00
|
|
|
|
2016-08-24 17:48:56 +08:00
|
|
|
err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-29 00:49:44 +08:00
|
|
|
}
|
|
|
|
|
2015-09-08 17:02:08 +08:00
|
|
|
err = c.criuSwrk(nil, req, criuOpts, false)
|
2015-04-10 20:48:28 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-13 12:45:43 +08:00
|
|
|
return nil
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 02:25:22 +08:00
|
|
|
func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
|
|
|
|
mountDest := m.Destination
|
|
|
|
if strings.HasPrefix(mountDest, c.config.Rootfs) {
|
|
|
|
mountDest = mountDest[len(c.config.Rootfs):]
|
|
|
|
}
|
|
|
|
|
|
|
|
extMnt := &criurpc.ExtMountMap{
|
|
|
|
Key: proto.String(mountDest),
|
|
|
|
Val: proto.String(m.Source),
|
|
|
|
}
|
|
|
|
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
|
|
|
|
}
|
|
|
|
|
2016-03-29 05:41:50 +08:00
|
|
|
func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
|
|
|
|
for _, iface := range c.config.Networks {
|
|
|
|
switch iface.Type {
|
|
|
|
case "veth":
|
|
|
|
veth := new(criurpc.CriuVethPair)
|
|
|
|
veth.IfOut = proto.String(iface.HostInterfaceName)
|
|
|
|
veth.IfIn = proto.String(iface.Name)
|
|
|
|
req.Opts.Veths = append(req.Opts.Veths, veth)
|
|
|
|
case "loopback":
|
2017-07-28 21:02:56 +08:00
|
|
|
// Do nothing
|
2016-03-29 05:41:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, i := range criuOpts.VethPairs {
|
|
|
|
veth := new(criurpc.CriuVethPair)
|
|
|
|
veth.IfOut = proto.String(i.HostInterfaceName)
|
|
|
|
veth.IfIn = proto.String(i.ContainerInterfaceName)
|
|
|
|
req.Opts.Veths = append(req.Opts.Veths, veth)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-19 09:28:40 +08:00
|
|
|
func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
|
2015-03-13 12:45:43 +08:00
|
|
|
c.m.Lock()
|
|
|
|
defer c.m.Unlock()
|
2016-04-23 21:39:42 +08:00
|
|
|
|
|
|
|
// TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
|
|
|
|
// support for unprivileged restore at the moment.
|
|
|
|
if c.config.Rootless {
|
|
|
|
return fmt.Errorf("cannot restore a rootless container")
|
|
|
|
}
|
|
|
|
|
2017-07-25 00:03:50 +08:00
|
|
|
// criu 1.5.2 => 10502
|
|
|
|
if err := c.checkCriuVersion(10502); err != nil {
|
2015-04-01 23:15:00 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-04-19 09:28:40 +08:00
|
|
|
if criuOpts.WorkDirectory == "" {
|
|
|
|
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
|
|
|
|
}
|
2015-04-02 14:54:02 +08:00
|
|
|
// Since a container can be C/R'ed multiple times,
|
|
|
|
// the work directory may already exist.
|
2015-04-19 09:28:40 +08:00
|
|
|
if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) {
|
2015-04-02 14:54:02 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-04-19 09:28:40 +08:00
|
|
|
workDir, err := os.Open(criuOpts.WorkDirectory)
|
2015-04-02 14:54:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer workDir.Close()
|
2015-04-19 09:28:40 +08:00
|
|
|
if criuOpts.ImagesDirectory == "" {
|
2015-10-03 02:16:50 +08:00
|
|
|
return fmt.Errorf("invalid directory to restore checkpoint")
|
2015-04-19 09:28:40 +08:00
|
|
|
}
|
|
|
|
imageDir, err := os.Open(criuOpts.ImagesDirectory)
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer imageDir.Close()
|
2015-04-22 13:12:41 +08:00
|
|
|
// CRIU has a few requirements for a root directory:
|
|
|
|
// * it must be a mount point
|
|
|
|
// * its parent must not be overmounted
|
|
|
|
// c.config.Rootfs is bind-mounted to a temporary directory
|
|
|
|
// to satisfy these requirements.
|
2015-04-16 19:15:02 +08:00
|
|
|
root := filepath.Join(c.root, "criu-root")
|
|
|
|
if err := os.Mkdir(root, 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer os.Remove(root)
|
|
|
|
root, err = filepath.EvalSymlinks(root)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-10 05:38:27 +08:00
|
|
|
err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "")
|
2015-04-16 19:15:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-10 05:38:27 +08:00
|
|
|
defer unix.Unmount(root, unix.MNT_DETACH)
|
2015-03-26 19:20:59 +08:00
|
|
|
t := criurpc.CriuReqType_RESTORE
|
2015-07-21 02:25:22 +08:00
|
|
|
req := &criurpc.CriuReq{
|
2015-03-26 19:20:59 +08:00
|
|
|
Type: &t,
|
|
|
|
Opts: &criurpc.CriuOpts{
|
2017-03-02 16:02:15 +08:00
|
|
|
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
|
|
|
|
WorkDirFd: proto.Int32(int32(workDir.Fd())),
|
|
|
|
EvasiveDevices: proto.Bool(true),
|
|
|
|
LogLevel: proto.Int32(4),
|
|
|
|
LogFile: proto.String("restore.log"),
|
|
|
|
RstSibling: proto.Bool(true),
|
|
|
|
Root: proto.String(root),
|
|
|
|
ManageCgroups: proto.Bool(true),
|
|
|
|
NotifyScripts: proto.Bool(true),
|
|
|
|
ShellJob: proto.Bool(criuOpts.ShellJob),
|
|
|
|
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
|
|
|
|
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
|
|
|
|
FileLocks: proto.Bool(criuOpts.FileLocks),
|
|
|
|
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
|
|
|
|
OrphanPtsMaster: proto.Bool(true),
|
2017-08-18 06:31:49 +08:00
|
|
|
AutoDedup: proto.Bool(criuOpts.AutoDedup),
|
2017-07-24 23:43:14 +08:00
|
|
|
LazyPages: proto.Bool(criuOpts.LazyPages),
|
2015-03-26 19:20:59 +08:00
|
|
|
},
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
2015-09-08 17:02:08 +08:00
|
|
|
|
2015-03-07 03:21:02 +08:00
|
|
|
for _, m := range c.config.Mounts {
|
2015-07-21 02:25:22 +08:00
|
|
|
switch m.Device {
|
|
|
|
case "bind":
|
|
|
|
c.addCriuRestoreMount(req, m)
|
|
|
|
case "cgroup":
|
|
|
|
binds, err := getCgroupMounts(m)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-04-29 00:49:44 +08:00
|
|
|
}
|
2015-07-21 02:25:22 +08:00
|
|
|
for _, b := range binds {
|
|
|
|
c.addCriuRestoreMount(req, b)
|
|
|
|
}
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
|
|
|
}
|
2016-03-29 05:41:50 +08:00
|
|
|
|
2016-10-13 09:15:18 +08:00
|
|
|
if len(c.config.MaskPaths) > 0 {
|
|
|
|
m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
|
|
|
|
c.addCriuRestoreMount(req, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, node := range c.config.Devices {
|
|
|
|
m := &configs.Mount{Destination: node.Path, Source: node.Path}
|
|
|
|
c.addCriuRestoreMount(req, m)
|
|
|
|
}
|
|
|
|
|
2017-05-10 05:38:27 +08:00
|
|
|
if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
|
2016-03-29 05:41:50 +08:00
|
|
|
c.restoreNetwork(req, criuOpts)
|
2015-08-25 00:26:39 +08:00
|
|
|
}
|
2015-04-29 00:49:44 +08:00
|
|
|
|
2015-08-06 23:14:59 +08:00
|
|
|
// append optional manage cgroups mode
|
|
|
|
if criuOpts.ManageCgroupsMode != 0 {
|
2017-07-25 00:03:50 +08:00
|
|
|
// criu 1.7 => 10700
|
|
|
|
if err := c.checkCriuVersion(10700); err != nil {
|
2015-08-06 23:14:59 +08:00
|
|
|
return err
|
|
|
|
}
|
2016-02-19 06:08:23 +08:00
|
|
|
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
|
|
|
|
req.Opts.ManageCgroupsMode = &mode
|
2015-08-06 23:14:59 +08:00
|
|
|
}
|
|
|
|
|
2015-05-05 02:25:43 +08:00
|
|
|
var (
|
|
|
|
fds []string
|
|
|
|
fdJSON []byte
|
|
|
|
)
|
2015-08-05 05:44:45 +08:00
|
|
|
if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
|
2015-04-29 00:49:44 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-10-03 02:16:50 +08:00
|
|
|
if err := json.Unmarshal(fdJSON, &fds); err != nil {
|
2015-04-29 00:49:44 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-04-29 23:26:18 +08:00
|
|
|
for i := range fds {
|
2015-04-29 00:49:44 +08:00
|
|
|
if s := fds[i]; strings.Contains(s, "pipe:") {
|
2015-03-26 19:20:59 +08:00
|
|
|
inheritFd := new(criurpc.InheritFd)
|
|
|
|
inheritFd.Key = proto.String(s)
|
2015-04-29 23:26:18 +08:00
|
|
|
inheritFd.Fd = proto.Int32(int32(i))
|
2015-03-26 19:20:59 +08:00
|
|
|
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
|
2015-03-19 11:22:21 +08:00
|
|
|
}
|
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
return c.criuSwrk(process, req, criuOpts, true)
|
2015-04-10 23:18:16 +08:00
|
|
|
}
|
|
|
|
|
2015-09-08 17:02:08 +08:00
|
|
|
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
|
2016-04-23 21:39:42 +08:00
|
|
|
// XXX: Do we need to deal with this case? AFAIK criu still requires root.
|
2015-09-08 17:02:08 +08:00
|
|
|
if err := c.cgroupManager.Apply(pid); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-04-07 07:34:41 +08:00
|
|
|
if err := c.cgroupManager.Set(c.config); err != nil {
|
|
|
|
return newSystemError(err)
|
|
|
|
}
|
|
|
|
|
2015-09-08 17:02:08 +08:00
|
|
|
path := fmt.Sprintf("/proc/%d/cgroup", pid)
|
|
|
|
cgroupsPaths, err := cgroups.ParseCgroupFile(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for c, p := range cgroupsPaths {
|
|
|
|
cgroupRoot := &criurpc.CgroupRoot{
|
|
|
|
Ctrl: proto.String(c),
|
|
|
|
Path: proto.String(p),
|
|
|
|
}
|
|
|
|
req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error {
|
2017-05-10 05:38:27 +08:00
|
|
|
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
|
2015-04-10 23:18:16 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-27 02:05:01 +08:00
|
|
|
var logPath string
|
|
|
|
if opts != nil {
|
|
|
|
logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
|
|
|
|
} else {
|
|
|
|
// For the VERSION RPC 'opts' is set to 'nil' and therefore
|
|
|
|
// opts.WorkDirectory does not exist. Set logPath to "".
|
|
|
|
logPath = ""
|
|
|
|
}
|
2015-04-10 23:18:16 +08:00
|
|
|
criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
|
2017-03-02 16:02:15 +08:00
|
|
|
criuClientFileCon, err := net.FileConn(criuClient)
|
|
|
|
criuClient.Close()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
criuClientCon := criuClientFileCon.(*net.UnixConn)
|
|
|
|
defer criuClientCon.Close()
|
|
|
|
|
2015-04-10 23:18:16 +08:00
|
|
|
criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
|
|
|
|
defer criuServer.Close()
|
|
|
|
|
2015-03-26 19:20:59 +08:00
|
|
|
args := []string{"swrk", "3"}
|
2017-07-27 02:05:01 +08:00
|
|
|
if c.criuVersion != 0 {
|
|
|
|
// If the CRIU Version is still '0' then this is probably
|
|
|
|
// the initial CRIU run to detect the version. Skip it.
|
|
|
|
logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
|
|
|
|
}
|
2015-08-31 19:34:14 +08:00
|
|
|
logrus.Debugf("Using CRIU with following args: %s", args)
|
2015-03-13 12:45:43 +08:00
|
|
|
cmd := exec.Command(c.criuPath, args...)
|
2015-04-10 23:19:14 +08:00
|
|
|
if process != nil {
|
|
|
|
cmd.Stdin = process.Stdin
|
|
|
|
cmd.Stdout = process.Stdout
|
|
|
|
cmd.Stderr = process.Stderr
|
|
|
|
}
|
2015-03-26 19:20:59 +08:00
|
|
|
cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
|
|
|
|
|
2015-03-13 12:45:43 +08:00
|
|
|
if err := cmd.Start(); err != nil {
|
2015-03-19 11:22:21 +08:00
|
|
|
return err
|
2015-03-13 12:45:43 +08:00
|
|
|
}
|
2015-03-26 19:20:59 +08:00
|
|
|
criuServer.Close()
|
2015-03-25 21:21:44 +08:00
|
|
|
|
2015-03-26 19:20:59 +08:00
|
|
|
defer func() {
|
2017-03-02 16:02:15 +08:00
|
|
|
criuClientCon.Close()
|
2015-05-21 06:48:07 +08:00
|
|
|
_, err := cmd.Process.Wait()
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-09-08 17:02:08 +08:00
|
|
|
if applyCgroups {
|
|
|
|
err := c.criuApplyCgroups(cmd.Process.Pid, req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-14 17:42:21 +08:00
|
|
|
var extFds []string
|
2015-04-29 04:54:03 +08:00
|
|
|
if process != nil {
|
2015-05-14 17:42:21 +08:00
|
|
|
extFds, err = getPipeFds(cmd.Process.Pid)
|
2015-04-29 04:54:03 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-31 19:34:14 +08:00
|
|
|
logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
|
2017-03-15 04:21:58 +08:00
|
|
|
// In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
|
|
|
|
// should be empty. For older CRIU versions it still will be
|
2017-08-02 23:55:56 +08:00
|
|
|
// available but empty. criurpc.CriuReqType_VERSION actually
|
|
|
|
// has no req.GetOpts().
|
|
|
|
if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
|
|
|
|
req.GetType() == criurpc.CriuReqType_VERSION) {
|
|
|
|
|
2017-03-15 04:21:58 +08:00
|
|
|
val := reflect.ValueOf(req.GetOpts())
|
|
|
|
v := reflect.Indirect(val)
|
|
|
|
for i := 0; i < v.NumField(); i++ {
|
|
|
|
st := v.Type()
|
|
|
|
name := st.Field(i).Name
|
|
|
|
if strings.HasPrefix(name, "XXX_") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
value := val.MethodByName("Get" + name).Call([]reflect.Value{})
|
|
|
|
logrus.Debugf("CRIU option %s with value %v", name, value[0])
|
2015-08-31 19:34:14 +08:00
|
|
|
}
|
|
|
|
}
|
2015-04-10 23:18:16 +08:00
|
|
|
data, err := proto.Marshal(req)
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-03-25 21:21:44 +08:00
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
_, err = criuClientCon.Write(data)
|
2015-03-07 03:21:02 +08:00
|
|
|
if err != nil {
|
2015-03-19 11:22:21 +08:00
|
|
|
return err
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
2015-03-25 21:21:44 +08:00
|
|
|
|
2015-03-26 19:20:59 +08:00
|
|
|
buf := make([]byte, 10*4096)
|
2017-03-02 16:02:15 +08:00
|
|
|
oob := make([]byte, 4096)
|
2015-03-26 19:20:59 +08:00
|
|
|
for true {
|
2017-03-02 16:02:15 +08:00
|
|
|
n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return fmt.Errorf("unexpected EOF")
|
|
|
|
}
|
|
|
|
if n == len(buf) {
|
|
|
|
return fmt.Errorf("buffer is too small")
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := new(criurpc.CriuResp)
|
|
|
|
err = proto.Unmarshal(buf[:n], resp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !resp.GetSuccess() {
|
2015-08-22 05:20:59 +08:00
|
|
|
typeString := req.GetType().String()
|
2017-08-02 23:55:56 +08:00
|
|
|
if typeString == "VERSION" {
|
|
|
|
// If the VERSION RPC fails this probably means that the CRIU
|
|
|
|
// version is too old for this RPC. Just return 'nil'.
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-22 05:20:59 +08:00
|
|
|
return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
|
2015-03-26 19:20:59 +08:00
|
|
|
}
|
|
|
|
|
2015-04-10 23:18:16 +08:00
|
|
|
t := resp.GetType()
|
2015-03-26 19:20:59 +08:00
|
|
|
switch {
|
2017-08-02 23:55:56 +08:00
|
|
|
case t == criurpc.CriuReqType_VERSION:
|
|
|
|
logrus.Debugf("CRIU version: %s", resp)
|
|
|
|
criuVersionRPC = resp.GetVersion()
|
|
|
|
break
|
2017-03-15 04:21:58 +08:00
|
|
|
case t == criurpc.CriuReqType_FEATURE_CHECK:
|
|
|
|
logrus.Debugf("Feature check says: %s", resp)
|
|
|
|
criuFeatures = resp.GetFeatures()
|
2015-03-26 19:20:59 +08:00
|
|
|
case t == criurpc.CriuReqType_NOTIFY:
|
2017-03-02 16:02:15 +08:00
|
|
|
if err := c.criuNotifications(resp, process, opts, extFds, oob[:oobn]); err != nil {
|
2015-04-10 23:03:23 +08:00
|
|
|
return err
|
2015-03-26 19:20:59 +08:00
|
|
|
}
|
|
|
|
t = criurpc.CriuReqType_NOTIFY
|
2015-04-10 23:18:16 +08:00
|
|
|
req = &criurpc.CriuReq{
|
2015-03-26 19:20:59 +08:00
|
|
|
Type: &t,
|
|
|
|
NotifySuccess: proto.Bool(true),
|
|
|
|
}
|
2015-04-10 23:18:16 +08:00
|
|
|
data, err = proto.Marshal(req)
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
_, err = criuClientCon.Write(data)
|
2015-03-26 19:20:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
case t == criurpc.CriuReqType_RESTORE:
|
2015-04-10 23:19:14 +08:00
|
|
|
case t == criurpc.CriuReqType_DUMP:
|
2016-08-24 17:48:56 +08:00
|
|
|
case t == criurpc.CriuReqType_PRE_DUMP:
|
2015-03-26 19:20:59 +08:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unable to parse the response %s", resp.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-03-02 16:02:15 +08:00
|
|
|
criuClientCon.CloseWrite()
|
2015-03-26 19:20:59 +08:00
|
|
|
// cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
|
|
|
|
// Here we want to wait only the CRIU process.
|
|
|
|
st, err := cmd.Process.Wait()
|
|
|
|
if err != nil {
|
2015-03-19 11:22:21 +08:00
|
|
|
return err
|
2015-03-13 12:45:43 +08:00
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
|
|
|
|
// In pre-dump mode CRIU is in a loop and waits for
|
|
|
|
// the final DUMP command.
|
|
|
|
// The current runc pre-dump approach, however, is
|
|
|
|
// start criu in PRE_DUMP once for a single pre-dump
|
|
|
|
// and not the whole series of pre-dump, pre-dump, ...m, dump
|
|
|
|
// If we got the message CriuReqType_PRE_DUMP it means
|
|
|
|
// CRIU was successful and we need to forcefully stop CRIU
|
|
|
|
if !st.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
|
2015-08-22 05:20:59 +08:00
|
|
|
return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath)
|
2015-03-26 19:20:59 +08:00
|
|
|
}
|
2015-03-19 11:22:21 +08:00
|
|
|
return nil
|
2015-03-07 03:21:02 +08:00
|
|
|
}
|
|
|
|
|
2015-04-20 16:24:50 +08:00
|
|
|
// block any external network activity
|
|
|
|
func lockNetwork(config *configs.Config) error {
|
|
|
|
for _, config := range config.Networks {
|
|
|
|
strategy, err := getStrategy(config.Type)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := strategy.detach(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func unlockNetwork(config *configs.Config) error {
|
|
|
|
for _, config := range config.Networks {
|
|
|
|
strategy, err := getStrategy(config.Type)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = strategy.attach(config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-02 16:02:15 +08:00
|
|
|
func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string, oob []byte) error {
|
2015-04-10 23:03:23 +08:00
|
|
|
notify := resp.GetNotify()
|
|
|
|
if notify == nil {
|
|
|
|
return fmt.Errorf("invalid response: %s", resp.String())
|
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
logrus.Debugf("notify: %s\n", notify.GetScript())
|
2015-04-10 23:03:23 +08:00
|
|
|
switch {
|
2015-04-10 23:19:14 +08:00
|
|
|
case notify.GetScript() == "post-dump":
|
2015-10-03 02:16:50 +08:00
|
|
|
f, err := os.Create(filepath.Join(c.root, "checkpoint"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-04-10 23:19:14 +08:00
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
f.Close()
|
2015-04-20 16:24:50 +08:00
|
|
|
case notify.GetScript() == "network-unlock":
|
|
|
|
if err := unlockNetwork(c.config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case notify.GetScript() == "network-lock":
|
|
|
|
if err := lockNetwork(c.config); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-19 07:09:15 +08:00
|
|
|
case notify.GetScript() == "setup-namespaces":
|
|
|
|
if c.config.Hooks != nil {
|
2018-01-11 22:31:27 +08:00
|
|
|
bundle, annotations := utils.Annotations(c.config.Labels)
|
2016-02-19 07:09:15 +08:00
|
|
|
s := configs.HookState{
|
2018-01-11 22:31:27 +08:00
|
|
|
Version: c.config.Version,
|
|
|
|
ID: c.id,
|
|
|
|
Pid: int(notify.GetPid()),
|
|
|
|
Bundle: bundle,
|
|
|
|
Annotations: annotations,
|
2016-02-19 07:09:15 +08:00
|
|
|
}
|
2016-04-19 02:37:26 +08:00
|
|
|
for i, hook := range c.config.Hooks.Prestart {
|
2016-02-19 07:09:15 +08:00
|
|
|
if err := hook.Run(s); err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
|
2016-02-19 07:09:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-10 23:03:23 +08:00
|
|
|
case notify.GetScript() == "post-restore":
|
|
|
|
pid := notify.GetPid()
|
2015-05-14 17:42:21 +08:00
|
|
|
r, err := newRestoredProcess(int(pid), fds)
|
2015-04-10 23:03:23 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
process.ops = r
|
|
|
|
if err := c.state.transition(&restoredState{
|
|
|
|
imageDir: opts.ImagesDirectory,
|
|
|
|
c: c,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-22 06:15:18 +08:00
|
|
|
// create a timestamp indicating when the restored checkpoint was started
|
|
|
|
c.created = time.Now().UTC()
|
2016-07-05 08:24:13 +08:00
|
|
|
if _, err := c.updateState(r); err != nil {
|
2015-04-10 23:03:23 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
logrus.Error(err)
|
|
|
|
}
|
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
case notify.GetScript() == "orphan-pts-master":
|
2017-07-13 21:02:17 +08:00
|
|
|
scm, err := unix.ParseSocketControlMessage(oob)
|
2017-03-02 16:02:15 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-07-13 21:02:17 +08:00
|
|
|
fds, err := unix.ParseUnixRights(&scm[0])
|
2017-07-28 19:56:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-02 16:02:15 +08:00
|
|
|
|
|
|
|
master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
|
|
|
|
defer master.Close()
|
|
|
|
|
|
|
|
// While we can access console.master, using the API is a good idea.
|
2017-05-20 01:18:43 +08:00
|
|
|
if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil {
|
2017-03-02 16:02:15 +08:00
|
|
|
return err
|
|
|
|
}
|
2015-04-10 23:03:23 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-05 08:24:13 +08:00
|
|
|
func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
|
2017-08-15 14:30:58 +08:00
|
|
|
if process != nil {
|
|
|
|
c.initProcess = process
|
|
|
|
}
|
2015-02-14 06:41:37 +08:00
|
|
|
state, err := c.currentState()
|
2015-02-12 08:45:23 +08:00
|
|
|
if err != nil {
|
2016-07-05 08:24:13 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = c.saveState(state)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-02-12 08:45:23 +08:00
|
|
|
}
|
2016-07-05 08:24:13 +08:00
|
|
|
return state, nil
|
2015-10-03 02:16:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) saveState(s *State) error {
|
2015-02-12 08:45:23 +08:00
|
|
|
f, err := os.Create(filepath.Join(c.root, stateFilename))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
2016-01-26 10:15:44 +08:00
|
|
|
return utils.WriteJSON(f, s)
|
2015-10-03 02:16:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) deleteState() error {
|
|
|
|
return os.Remove(filepath.Join(c.root, stateFilename))
|
2015-02-12 08:45:23 +08:00
|
|
|
}
|
2015-02-14 06:41:37 +08:00
|
|
|
|
|
|
|
func (c *linuxContainer) currentStatus() (Status, error) {
|
2015-10-03 02:16:50 +08:00
|
|
|
if err := c.refreshState(); err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
return c.state.status(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// refreshState needs to be called to verify that the current state on the
|
|
|
|
// container is what is true. Because consumers of libcontainer can use it
|
|
|
|
// out of process we need to verify the container's status based on runtime
|
|
|
|
// information and not rely on our in process info.
|
|
|
|
func (c *linuxContainer) refreshState() error {
|
|
|
|
paused, err := c.isPaused()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if paused {
|
|
|
|
return c.state.transition(&pausedState{c: c})
|
2015-04-10 20:47:37 +08:00
|
|
|
}
|
2016-05-14 07:54:16 +08:00
|
|
|
t, err := c.runType()
|
2015-10-03 02:16:50 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-14 07:54:16 +08:00
|
|
|
switch t {
|
2016-05-14 08:01:12 +08:00
|
|
|
case Created:
|
|
|
|
return c.state.transition(&createdState{c: c})
|
2016-05-14 07:54:16 +08:00
|
|
|
case Running:
|
2015-10-03 02:16:50 +08:00
|
|
|
return c.state.transition(&runningState{c: c})
|
|
|
|
}
|
|
|
|
return c.state.transition(&stoppedState{c: c})
|
|
|
|
}
|
|
|
|
|
2016-05-14 07:54:16 +08:00
|
|
|
func (c *linuxContainer) runType() (Status, error) {
|
2015-02-14 06:41:37 +08:00
|
|
|
if c.initProcess == nil {
|
2016-05-14 07:54:16 +08:00
|
|
|
return Stopped, nil
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
2016-05-14 07:54:16 +08:00
|
|
|
pid := c.initProcess.pid()
|
2017-06-15 07:41:16 +08:00
|
|
|
stat, err := system.Stat(pid)
|
|
|
|
if err != nil {
|
|
|
|
return Stopped, nil
|
2016-05-14 07:54:16 +08:00
|
|
|
}
|
2017-06-15 07:41:16 +08:00
|
|
|
if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
|
|
|
|
return Stopped, nil
|
2016-07-05 08:24:13 +08:00
|
|
|
}
|
2017-02-22 10:16:19 +08:00
|
|
|
// We'll create exec fifo and blocking on it after container is created,
|
|
|
|
// and delete it after start container.
|
|
|
|
if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
|
2016-05-26 02:24:26 +08:00
|
|
|
return Created, nil
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
2016-05-14 07:54:16 +08:00
|
|
|
return Running, nil
|
2015-10-03 02:16:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) isPaused() (bool, error) {
|
2017-03-23 06:43:39 +08:00
|
|
|
fcg := c.cgroupManager.GetPaths()["freezer"]
|
|
|
|
if fcg == "" {
|
|
|
|
// A container doesn't have a freezer cgroup
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
data, err := ioutil.ReadFile(filepath.Join(fcg, "freezer.state"))
|
2015-10-03 02:16:50 +08:00
|
|
|
if err != nil {
|
2016-05-16 18:24:07 +08:00
|
|
|
// If freezer cgroup is not mounted, the container would just be not paused.
|
2015-10-03 02:16:50 +08:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
2016-04-19 02:37:26 +08:00
|
|
|
return false, newSystemErrorWithCause(err, "checking if container is paused")
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
2015-10-03 02:16:50 +08:00
|
|
|
return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *linuxContainer) currentState() (*State, error) {
|
2016-01-22 08:43:33 +08:00
|
|
|
var (
|
2017-06-15 06:38:45 +08:00
|
|
|
startTime uint64
|
2016-01-22 08:43:33 +08:00
|
|
|
externalDescriptors []string
|
|
|
|
pid = -1
|
|
|
|
)
|
|
|
|
if c.initProcess != nil {
|
|
|
|
pid = c.initProcess.pid()
|
|
|
|
startTime, _ = c.initProcess.startTime()
|
|
|
|
externalDescriptors = c.initProcess.externalDescriptors()
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID())
|
|
|
|
if err != nil {
|
|
|
|
intelRdtPath = ""
|
|
|
|
}
|
2015-02-14 06:41:37 +08:00
|
|
|
state := &State{
|
2015-10-24 00:22:48 +08:00
|
|
|
BaseState: BaseState{
|
|
|
|
ID: c.ID(),
|
|
|
|
Config: *c.config,
|
2016-01-22 08:43:33 +08:00
|
|
|
InitProcessPid: pid,
|
2015-10-24 00:22:48 +08:00
|
|
|
InitProcessStartTime: startTime,
|
2016-01-29 05:32:24 +08:00
|
|
|
Created: c.created,
|
2015-10-24 00:22:48 +08:00
|
|
|
},
|
2016-04-23 21:39:42 +08:00
|
|
|
Rootless: c.config.Rootless,
|
2015-10-24 00:22:48 +08:00
|
|
|
CgroupPaths: c.cgroupManager.GetPaths(),
|
libcontainer: add support for Intel RDT/CAT in runc
About Intel RDT/CAT feature:
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
For more information about Intel RDT/CAT can be found in the section 17.17
of Intel Software Developer Manual.
About Intel RDT/CAT kernel interface:
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
Is in root group.
The file `schemata` has allocation bitmasks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com>
2017-08-30 19:34:26 +08:00
|
|
|
IntelRdtPath: intelRdtPath,
|
2015-10-24 00:22:48 +08:00
|
|
|
NamespacePaths: make(map[configs.NamespaceType]string),
|
2016-01-22 08:43:33 +08:00
|
|
|
ExternalDescriptors: externalDescriptors,
|
2015-02-14 06:41:37 +08:00
|
|
|
}
|
2016-01-22 08:43:33 +08:00
|
|
|
if pid > 0 {
|
|
|
|
for _, ns := range c.config.Namespaces {
|
|
|
|
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
|
|
|
|
}
|
|
|
|
for _, nsType := range configs.NamespaceTypes() {
|
2016-03-02 09:59:26 +08:00
|
|
|
if !configs.IsNamespaceSupported(nsType) {
|
|
|
|
continue
|
|
|
|
}
|
2016-01-22 08:43:33 +08:00
|
|
|
if _, ok := state.NamespacePaths[nsType]; !ok {
|
|
|
|
ns := configs.Namespace{Type: nsType}
|
|
|
|
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
|
|
|
|
}
|
2015-04-08 05:16:29 +08:00
|
|
|
}
|
|
|
|
}
|
2015-02-14 06:41:37 +08:00
|
|
|
return state, nil
|
|
|
|
}
|
2015-10-17 23:35:36 +08:00
|
|
|
|
2015-09-14 08:37:56 +08:00
|
|
|
// orderNamespacePaths sorts namespace paths into a list of paths that we
|
|
|
|
// can setns in order.
|
|
|
|
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
|
|
|
|
paths := []string{}
|
2016-07-18 22:40:24 +08:00
|
|
|
|
2017-04-28 12:42:56 +08:00
|
|
|
for _, ns := range configs.NamespaceTypes() {
|
2017-04-25 18:26:40 +08:00
|
|
|
|
|
|
|
// Remove namespaces that we don't need to join.
|
|
|
|
if !c.config.Namespaces.Contains(ns) {
|
|
|
|
continue
|
2016-07-18 22:40:24 +08:00
|
|
|
}
|
2017-04-25 18:26:40 +08:00
|
|
|
|
|
|
|
if p, ok := namespaces[ns]; ok && p != "" {
|
2015-09-14 08:37:56 +08:00
|
|
|
// check if the requested namespace is supported
|
2017-04-25 18:26:40 +08:00
|
|
|
if !configs.IsNamespaceSupported(ns) {
|
|
|
|
return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns))
|
2015-09-14 08:37:56 +08:00
|
|
|
}
|
|
|
|
// only set to join this namespace if it exists
|
|
|
|
if _, err := os.Lstat(p); err != nil {
|
2016-04-19 02:37:26 +08:00
|
|
|
return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p)
|
2015-09-14 08:37:56 +08:00
|
|
|
}
|
|
|
|
// do not allow namespace path with comma as we use it to separate
|
|
|
|
// the namespace paths
|
|
|
|
if strings.ContainsRune(p, ',') {
|
|
|
|
return nil, newSystemError(fmt.Errorf("invalid path %s", p))
|
|
|
|
}
|
2017-04-25 18:26:40 +08:00
|
|
|
paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
|
2015-09-14 08:37:56 +08:00
|
|
|
}
|
2017-04-25 18:26:40 +08:00
|
|
|
|
2015-09-14 08:37:56 +08:00
|
|
|
}
|
2017-04-25 18:26:40 +08:00
|
|
|
|
2015-09-14 08:37:56 +08:00
|
|
|
return paths, nil
|
|
|
|
}
|
2015-09-14 08:40:43 +08:00
|
|
|
|
|
|
|
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
|
|
|
|
data := bytes.NewBuffer(nil)
|
|
|
|
for _, im := range idMap {
|
|
|
|
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
|
|
|
|
if _, err := data.WriteString(line); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return data.Bytes(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// bootstrapData encodes the necessary data in netlink binary format
|
|
|
|
// as a io.Reader.
|
|
|
|
// Consumer can write the data to a bootstrap program
|
|
|
|
// such as one that uses nsenter package to bootstrap the container's
|
|
|
|
// init process correctly, i.e. with correct namespaces, uid/gid
|
|
|
|
// mapping etc.
|
2016-06-03 23:29:34 +08:00
|
|
|
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) {
|
2015-09-14 08:40:43 +08:00
|
|
|
// create the netlink message
|
|
|
|
r := nl.NewNetlinkRequest(int(InitMsg), 0)
|
|
|
|
|
|
|
|
// write cloneFlags
|
|
|
|
r.AddData(&Int32msg{
|
|
|
|
Type: CloneFlagsAttr,
|
|
|
|
Value: uint32(cloneFlags),
|
|
|
|
})
|
|
|
|
|
|
|
|
// write custom namespace paths
|
|
|
|
if len(nsMaps) > 0 {
|
|
|
|
nsPaths, err := c.orderNamespacePaths(nsMaps)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: NsPathsAttr,
|
|
|
|
Value: []byte(strings.Join(nsPaths, ",")),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// write namespace paths only when we are not joining an existing user ns
|
|
|
|
_, joinExistingUser := nsMaps[configs.NEWUSER]
|
|
|
|
if !joinExistingUser {
|
|
|
|
// write uid mappings
|
|
|
|
if len(c.config.UidMappings) > 0 {
|
2017-09-05 22:25:17 +08:00
|
|
|
if c.config.Rootless && c.newuidmapPath != "" {
|
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: UidmapPathAttr,
|
|
|
|
Value: []byte(c.newuidmapPath),
|
|
|
|
})
|
|
|
|
}
|
2015-09-14 08:40:43 +08:00
|
|
|
b, err := encodeIDMapping(c.config.UidMappings)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: UidmapAttr,
|
|
|
|
Value: b,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// write gid mappings
|
|
|
|
if len(c.config.GidMappings) > 0 {
|
2016-03-12 13:18:42 +08:00
|
|
|
b, err := encodeIDMapping(c.config.GidMappings)
|
2015-09-14 08:40:43 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: GidmapAttr,
|
|
|
|
Value: b,
|
|
|
|
})
|
2017-09-05 22:25:17 +08:00
|
|
|
if c.config.Rootless && c.newgidmapPath != "" {
|
2017-07-21 01:33:01 +08:00
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: GidmapPathAttr,
|
|
|
|
Value: []byte(c.newgidmapPath),
|
|
|
|
})
|
2017-09-05 22:25:17 +08:00
|
|
|
}
|
|
|
|
// The following only applies if we are root.
|
|
|
|
if !c.config.Rootless {
|
2016-04-23 21:39:42 +08:00
|
|
|
// check if we have CAP_SETGID to setgroup properly
|
2018-02-20 07:23:56 +08:00
|
|
|
pid, err := capability.NewPid(0)
|
2016-04-23 21:39:42 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) {
|
|
|
|
r.AddData(&Boolmsg{
|
|
|
|
Type: SetgroupAttr,
|
|
|
|
Value: true,
|
|
|
|
})
|
|
|
|
}
|
2015-09-14 08:40:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-17 09:25:21 +08:00
|
|
|
// write oom_score_adj
|
|
|
|
r.AddData(&Bytemsg{
|
|
|
|
Type: OomScoreAdjAttr,
|
|
|
|
Value: []byte(fmt.Sprintf("%d", c.config.OomScoreAdj)),
|
|
|
|
})
|
|
|
|
|
2016-04-23 21:39:42 +08:00
|
|
|
// write rootless
|
|
|
|
r.AddData(&Boolmsg{
|
|
|
|
Type: RootlessAttr,
|
|
|
|
Value: c.config.Rootless,
|
|
|
|
})
|
|
|
|
|
2015-09-14 08:40:43 +08:00
|
|
|
return bytes.NewReader(r.Serialize()), nil
|
|
|
|
}
|
2017-01-25 07:24:05 +08:00
|
|
|
|
|
|
|
// ignoreTerminateErrors returns nil if the given err matches an error known
|
|
|
|
// to indicate that the terminate occurred successfully or err was nil, otherwise
|
|
|
|
// err is returned unaltered.
|
|
|
|
func ignoreTerminateErrors(err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2017-10-11 03:36:19 +08:00
|
|
|
s := err.Error()
|
|
|
|
switch {
|
|
|
|
case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"):
|
2017-01-25 07:24:05 +08:00
|
|
|
return nil
|
|
|
|
}
|
2017-10-11 03:36:19 +08:00
|
|
|
return err
|
2017-01-25 07:24:05 +08:00
|
|
|
}
|