add dockerfile and update docs

This commit is contained in:
lidj 2023-09-03 15:11:55 +08:00
parent 72e3b9b512
commit fab216f6bd
17 changed files with 809 additions and 662 deletions

53
Dockerfile Normal file
View File

@ -0,0 +1,53 @@
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive
# install required packages
RUN apt-get update && \
apt-get install -y cmake \
clang \
automake \
autoconf \
autotools-dev \
libtool \
python \
g++ \
git \
wget \
pkg-config \
cpio \
grub2 \
xorriso
# install elf tools
COPY ./scripts/read_procmgr_elf_tool /home/read_procmgr_elf_tool
WORKDIR /home/read_procmgr_elf_tool
RUN gcc elf.c main.c -o read_procmgr_elf_tool && \
mkdir -p /usr/bin/ && \
cp read_procmgr_elf_tool /usr/bin/
# copy user dir
COPY ./user /chos/user
# install musl-cross-make
WORKDIR /home
RUN git clone https://ghproxy.com//https://github.com/richfelker/musl-cross-make.git
WORKDIR /home/musl-cross-make
COPY ./scripts/musl-cross-make/config.mak .
RUN sed -i 's|MUSL_SRCDIR = $(REL_TOP)/musl-$(MUSL_VER)|MUSL_SRCDIR = /chos/user/musl-1.1.24|' Makefile && \
export C_INCLUDE_PATH="/chos/user/sys-include" && \
make -j12 && \
make install -j12
# install libevent (for memcached)
WORKDIR /home
RUN git clone https://ghproxy.com//https://github.com/libevent/libevent.git && \
cd libevent && \
./autogen.sh && \
./configure --disable-openssl --disable-debug-mode --disable-samples -prefix=/usr/libevent && \
make CC=/chos/user/musl-1.1.24/build/bin/musl-gcc -s && \
make install
# install hiredis (for YCSB-C)
WORKDIR /chos/user/demos/YCSB-C/redis/hiredis
RUN make CC=/chos/user/musl-1.1.24/build/bin/musl-gcc -s && \
make install

View File

@ -26,7 +26,7 @@ or use
./chbuild build
```
## Run in QEMU
### Run in QEMU
```shell
./qemu.exp # with a clean NVM backend
@ -35,9 +35,21 @@ or use
or
```shell
./build/simulate.sh
./build/simulate.sh # with the old NVM backend
```
### Docker
By default, we provide a pre-built Docker image, you can simply use the build command and it will be automatically downloaded.
If you want to build this image from scratch, you can use the following command to build from the provided dockerfile.
```shell
docker build -t <image_name> .
```
To use the newly built container, you can modify the Docker image name in the `chbuild` file (specifically, line 218 in the `_docker_run()` function) to the image you have built.
## Artificial Evaluation
Please refer to [artificial_eval.md](./artificial_eval.md)
@ -65,6 +77,10 @@ Please refer to [artificial_eval.md](./artificial_eval.md)
|- config.cmake user applications flags
```
### TreeSLS's Implementation
Please refer to [TreeSLS.md](./docs/TreeSLS.md)
## LICENSE
LICENSE of ported applications are given in subdirs.

View File

@ -6,11 +6,15 @@ We thank the artifact evaluators who have volunteered to do one of the toughest
Hardware
- Intel® Optane™ Persistent Memory (or you can use Qemu mode for simulation)
- CPU Core >= 20
Software
- docker: we build the OS within a given docker
- qemu-system-x86: use qemu mode to boot the OS
- ipmitool: for interacting with the real machine (with kernel loaded)
- expect & python3: for scripts
- expect: for interacting with the real machine
- python3: for parsing and drawing
- requirements: matplotlib, pandas, numpy, seaborn
## Building TreeSLS OS
@ -208,3 +212,25 @@ After all, run `./fig12.sh`.
2. run Rocksdb test provided by Aurora (https://github.com/rcslab/aurora-bench/tree/master), scripts are given in `6-aurora-rocksdb/test_rockdb.sh`
3. copy logs of Aurora (by default, in `/aurora-data/`) to `artificial_evaluation/logs/<mode>/rocksdb/`
3. run `./fig14.sh`
## Common Q&A
Q. Changing the CPU number
A. Currenly we hard code the CPU number. To change it, you can:
- Open the file `kernel/arch/x86_64/boot/CMakeLists.txt`. Change line 36 `-smp XXX` with a new value.
- Open the file `/kernel/include/arch/x86_64/plat/intel/machine.h`. Change line 4 `#define PLAT_CPU_NUM XXX` to half the value used above. For example, if using `-smp 20`, change it to `#define PLAT_CPU_NUM 10`. (we use half of the cores to use a single NUMA on the server).
Q. QEMU bug:
```
[INFO] General Protection Fault
[INFO] Faulting Address: 0x0
[INFO] Current thread 0
[INFO] Trap from 0xffffffffc011338b EC 0 Trap No. 13
[INFO] DS 0x0, CS 0x10, RSP 0xffffffffc0109030, SS 0x18
[INFO] rax: 0x706b0, rdx: 0x80010031, rdi: 0xffffffffca14aa80
[INFO] rcx: 0x65
```
A. We have also encountered this bug on Ubuntu 22.04. The issue stems from QEMU having problems with emulating the CPU's PCID feature support on this particular version, thus leading to a General Protection Fault bug.
You can try running it on a machine with a different operating system installed. We have successfully boot it on some machines with Debian and Fedora.

View File

@ -211,8 +211,9 @@ _docker_run() {
$@
else
test -t 1 && use_tty="-t"
docker run -i $use_tty --rm \
-u $(id -u ${USER}):$(id -g ${USER}) \
# docker run -i $use_tty --rm \
# -u $(id -u ${USER}):$(id -g ${USER}) \
docker run -it \
-v $(pwd):/chos -w /chos \
promisivia/treesls_chcore_builder:v2.2 \
$self $@

634
chpm
View File

@ -1,634 +0,0 @@
#!/usr/bin/env python3
#
# 预设的环境变量:
#
# CHPM_TARGET_ARCH 目标编译架构
# CHPM_TARGET_PREFIX 目标安装位置
#
# CHPM_RECIPE_FILE recipe 文件路径 (ports/<package>/recipe.json)
# CHPM_RECIPE_DIR recipe 文件目录路径 (ports/<package>)
#
# CHPM_PKG_NAME 正在安装的包名
# CHPM_PKG_VERSION 正在安装的包版本
#
# CHPM_BUILD_DIR 临时构建目录 (.chpm/build/<package>)
#
import os
import sys
import json
import shutil
import subprocess
import zipfile
from os import path
from pprint import pprint
from functools import partial
from dataclasses import dataclass, field
from argparse import ArgumentParser, Namespace
from typing import List, Set, Dict, Optional, Any, Callable
print_dbg = partial(print, file=sys.stdout)
print_info = partial(print, file=sys.stdout)
print_err = partial(print, file=sys.stderr)
CHCORE_DIR = os.getcwd()
CHPM_DIR = path.join(CHCORE_DIR, ".chpm")
PORTS_DIR = path.join(CHPM_DIR, "ports")
PORTS_REPO = "git@ipads.se.sjtu.edu.cn:ipads-os/chcore-ports.git"
BUILD_DIR = path.join(CHPM_DIR, "build")
DOWNLOADS_DIR = path.join(CHPM_DIR, "downloads")
DEFAULT_INSTALL_DIR = path.join(CHPM_DIR, "install")
PKGINFO_DIR_NAME = "chpm-pkginfo"
@dataclass
class File:
url: str
name: str
extract_dir: str
# sha256: Optional[str] = None # TODO
@dataclass
class RecipeHead:
name: str
version: str
description: str
def read_recipe_head(package_name: str) -> Optional[RecipeHead]:
recipe_file = path.join(PORTS_DIR, package_name, "recipe.json")
if not path.isfile(recipe_file):
return None
with open(recipe_file, "r", encoding="utf-8") as f:
recipe_json = json.load(f)
return RecipeHead(
name=recipe_json["name"],
version=recipe_json["version"],
description=recipe_json["description"],
)
@dataclass
class Recipe:
file: str
target_arch: str
name: str
version: str
description: str
supported_archs: Set[str] = field(default_factory=set)
dependencies: List["Recipe"] = field(default_factory=list)
source_files: List[File] = field(default_factory=list)
prebuilt_files: List[File] = field(default_factory=list)
docker_builder: Optional[str] = None
working_dir: str = "."
build: List[str] = field(default_factory=list)
install: List[str] = field(default_factory=list)
install_prebuilt: List[str] = field(default_factory=list)
def read_recipe(package_name: str, arch: str) -> Recipe:
recipe_file = path.join(PORTS_DIR, package_name, "recipe.json")
if not path.isfile(recipe_file):
print_err(f"Package `{package_name}` not found")
exit(1)
with open(recipe_file, "r", encoding="utf-8") as f:
recipe_json = json.load(f)
recipe = Recipe(
file=recipe_file,
target_arch=arch,
name=recipe_json["name"],
version=recipe_json["version"],
description=recipe_json["description"],
)
if "supported_archs" in recipe_json:
recipe.supported_archs = set(recipe_json["supported_archs"])
def get_arch_aware_list(
recipe_json: Dict[str, Any], key: str, arch: str
) -> List[Any]:
val = recipe_json.get(key)
if not val or not isinstance(val, (list, dict)):
return []
if isinstance(val, dict):
return val.get(arch, [])
return val
dependencies = get_arch_aware_list(recipe_json, "dependencies", arch)
for dep in dependencies:
recipe.dependencies.append(read_recipe(dep, arch))
def json_to_file(file_json: Dict[str, Any]) -> File:
return File(
url=file_json["url"],
name=file_json["name"],
extract_dir=file_json["extract_dir"],
)
source_files = get_arch_aware_list(recipe_json, "source_files", arch)
for file in source_files:
recipe.source_files.append(json_to_file(file))
prebuilt_files = get_arch_aware_list(recipe_json, "prebuilt_files", arch)
for file in prebuilt_files:
recipe.prebuilt_files.append(json_to_file(file))
recipe.docker_builder = recipe_json.get("docker_builder")
recipe.working_dir = recipe_json.get("working_dir") or "$CHPM_BUILD_DIR"
recipe.build = get_arch_aware_list(recipe_json, "build", arch)
recipe.install = get_arch_aware_list(recipe_json, "install", arch)
recipe.install_prebuilt = get_arch_aware_list(recipe_json, "install_prebuilt", arch)
return recipe
def check_recipe_installable(recipe: Recipe):
unsupported_packages = set()
def _intersect_supported_archs(r: Recipe, arch: str):
if r.supported_archs and arch not in r.supported_archs:
unsupported_packages.add(r.name)
for dep in r.dependencies:
_intersect_supported_archs(dep, arch)
_intersect_supported_archs(recipe, recipe.target_arch)
if unsupported_packages:
pkgs = ", ".join([f"`{x}`" for x in sorted(list(unsupported_packages))])
be = "is" if len(unsupported_packages) == 1 else "are"
print_err(
f"Package {pkgs} {be} not supported to install on `{recipe.target_arch}`"
)
exit(1)
@dataclass
class PkgInfo:
name: str
version: str
description: str
dependencies: List["str"]
is_dependency: bool
# installed_files: List[str] # TODO
def read_pkginfo(recipe: Recipe, prefix: str) -> Optional[PkgInfo]:
pkginfo_file = path.join(prefix, PKGINFO_DIR_NAME, f"{recipe.name}.json")
if not path.exists(pkginfo_file):
return None
with open(pkginfo_file, "r", encoding="utf-8") as f:
pkginfo_json = json.load(f)
return PkgInfo(**pkginfo_json)
def read_all_pkginfos(prefix: str) -> List[PkgInfo]:
pkginfo_dir = path.join(prefix, PKGINFO_DIR_NAME)
if not path.exists(pkginfo_dir):
return []
pkginfos = []
for file in os.listdir(pkginfo_dir):
with open(path.join(pkginfo_dir, file), "r", encoding="utf-8") as f:
pkginfo_json = json.load(f)
pkginfos.append(PkgInfo(**pkginfo_json))
return pkginfos
def write_pkginfo(recipe: Recipe, is_dependency: bool, prefix: str):
pkginfo_dir = path.join(prefix, PKGINFO_DIR_NAME)
pkginfo_file = path.join(pkginfo_dir, f"{recipe.name}.json")
os.makedirs(pkginfo_dir, exist_ok=True)
deps = [d.name for d in recipe.dependencies]
pkginfo = PkgInfo(
name=recipe.name,
version=recipe.version,
description=recipe.description,
dependencies=deps,
is_dependency=is_dependency,
)
with open(pkginfo_file, "w", encoding="utf-8") as f:
json.dump(pkginfo.__dict__, f, indent=4)
def download_file(file: File, downloads_dir: str) -> Optional[str]:
local_path = path.join(downloads_dir, file.name)
print_dbg("local_path", local_path)
if path.exists(local_path):
print_dbg(f"File `{local_path}` already exists")
else:
if subprocess.call(["curl", "-L", "-o", local_path, "--fail", file.url]) != 0:
if path.isfile(local_path):
os.remove(local_path)
return None
return local_path
def extract_file(file: File, local_file: str, build_dir: str) -> Optional[str]:
extract_dir = path.join(build_dir, file.extract_dir)
print_dbg("extract_dir:", extract_dir)
if path.exists(extract_dir):
print_dbg(f"Directory `{extract_dir}` already exists")
return extract_dir
tmp_extract_dir = path.join(build_dir, file.extract_dir + "_tmp")
print_dbg("tmp_extract_dir:", tmp_extract_dir)
os.mkdir(tmp_extract_dir)
class ZipFileWithPermissions(zipfile.ZipFile):
def _extract_member(self, member, targetpath, pwd):
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
super_extract_member = getattr(super(), "_extract_member")
targetpath = super_extract_member(member, targetpath, pwd)
attr = member.external_attr >> 16
if attr != 0:
os.chmod(targetpath, attr)
return targetpath
def unpack_zip(filename: str, extract_dir: str):
with ZipFileWithPermissions(filename) as zip:
zip.extractall(extract_dir)
shutil.unregister_unpack_format("zip")
shutil.register_unpack_format("zip", [".zip"], unpack_zip)
try:
shutil.unpack_archive(local_file, tmp_extract_dir)
except Exception:
shutil.rmtree(tmp_extract_dir)
return None
# strip the only subdirectory if any
subdirs = os.listdir(tmp_extract_dir)
if len(subdirs) == 1 and subdirs[0] == file.extract_dir:
shutil.move(path.join(tmp_extract_dir, subdirs[0]), extract_dir)
shutil.rmtree(tmp_extract_dir)
else:
shutil.move(tmp_extract_dir, extract_dir)
return extract_dir
def download_and_extract_files(
files: List[File], downloads_dir: str, build_dir: str
) -> bool:
for file in files:
print_info(f"Downloading {file.name} from {file.url}...")
local_file = download_file(file, downloads_dir)
if not local_file:
print_err("Failed to download", file.name, "from", file.url)
return False
print_info(f"Extracting {file.name} to {file.extract_dir}...")
extract_dir = extract_file(file, local_file, build_dir)
if not extract_dir:
print_err(
"Failed to extract",
file.name,
"to",
file.extract_dir,
)
return False
return True
def eval_env_vars(s: str) -> str:
return subprocess.check_output(["bash", "-c", f'echo -n "{s}"']).decode()
def try_install_recipe_from_prebuilt(
recipe: Recipe,
downloads_dir: str,
build_dir: str,
cmd_prefix: List[str],
working_dir: str,
):
if not download_and_extract_files(recipe.prebuilt_files, downloads_dir, build_dir):
return False
print_info("Installing from prebuilt files...")
for install_cmd in recipe.install_prebuilt:
cmd = cmd_prefix + ["bash", "-c", f"cd {working_dir} && " + install_cmd]
print_dbg("cmd:", cmd)
ret = subprocess.call(cmd)
if ret != 0:
print_err("Failed to run install prebuilt command")
return False
return True
def try_install_recipe_from_source(
recipe: Recipe,
downloads_dir: str,
build_dir: str,
cmd_prefix: List[str],
working_dir: str,
) -> bool:
if not download_and_extract_files(recipe.source_files, downloads_dir, build_dir):
return False
print_info("Installing from source files...")
print_info("Running build commands...")
for build_cmd in recipe.build:
cmd = cmd_prefix + ["bash", "-c", f"cd {working_dir} && " + build_cmd]
print_dbg("cmd:", cmd)
ret = subprocess.call(cmd)
if ret != 0:
print_err("Failed to run build command")
return False
print_info("Running install commands...")
for install_cmd in recipe.install:
cmd = cmd_prefix + ["bash", "-c", f"cd {working_dir} && " + install_cmd]
print_dbg("cmd:", cmd)
ret = subprocess.call(cmd)
if ret != 0:
print_err("Failed to run install command")
return False
return True
def try_install_recipe(recipe: Recipe, downloads_dir: str, build_dir: str) -> bool:
os.environ["CHPM_RECIPE_FILE"] = recipe.file
os.environ["CHPM_RECIPE_DIR"] = path.dirname(recipe.file)
os.environ["CHPM_PKG_NAME"] = recipe.name
os.environ["CHPM_PKG_VERSION"] = recipe.version
os.environ["CHPM_BUILD_DIR"] = build_dir
for file in recipe.source_files + recipe.prebuilt_files:
file.url = eval_env_vars(file.url)
file.name = eval_env_vars(file.name)
file.extract_dir = eval_env_vars(file.extract_dir)
recipe.working_dir = eval_env_vars(recipe.working_dir)
cmd_prefix = []
if recipe.docker_builder:
# For packages that should be built in docker container,
# we prepare a prefix for every build command and install
# command.
cmd_prefix.extend(
[
"docker",
"run",
"-ti",
"--rm",
"-u",
f"{os.getuid()}:{os.getgid()}",
"-v",
f"{CHCORE_DIR}:{CHCORE_DIR}",
]
)
for (env, val) in os.environ.items():
if env.startswith("CHPM_"):
cmd_prefix.extend(["-e", f"{env}={val}"])
cmd_prefix.append(recipe.docker_builder)
working_dir = recipe.working_dir
print_dbg("working_dir:", recipe.working_dir)
if recipe.install_prebuilt:
return try_install_recipe_from_prebuilt(
recipe, downloads_dir, build_dir, cmd_prefix, working_dir
)
return try_install_recipe_from_source(
recipe, downloads_dir, build_dir, cmd_prefix, working_dir
)
def install_recipe(
recipe: Recipe, prefix: str, is_dependency: bool = False, dev_mode: bool = False
):
if read_pkginfo(recipe, prefix):
print_info(f"`{recipe.name}` is already installed")
return
for dep in recipe.dependencies:
install_recipe(dep, prefix, is_dependency=True)
print_info(f"Installing `{recipe.name}`...")
downloads_dir = path.join(DOWNLOADS_DIR, recipe.target_arch, recipe.name)
os.makedirs(downloads_dir, exist_ok=True)
print_dbg("downloads_dir:", downloads_dir)
build_dir = path.join(BUILD_DIR, recipe.name)
if not dev_mode and path.isdir(build_dir):
shutil.rmtree(build_dir)
os.makedirs(build_dir, exist_ok=dev_mode)
print_dbg("build_dir:", build_dir)
ok = try_install_recipe(recipe, downloads_dir, build_dir)
if not dev_mode:
shutil.rmtree(build_dir)
if ok:
write_pkginfo(recipe, is_dependency, prefix)
print_info(f"Successfully installed `{recipe.name}`")
else:
print_err(f"Failed to install `{recipe.name}`")
exit(1)
subcommands = {}
def subcommand(name: str):
def deco(func: Callable) -> Callable:
subcommands[name] = func
return func
return deco
@subcommand("init")
def subcommand_init(args: Namespace):
if path.exists(CHPM_DIR):
if args.force:
shutil.rmtree(CHPM_DIR)
else:
print_err(
"`.chpm` directory already exists, "
"add `--force` option to force re-initialization"
)
exit(1)
print_info("Initializing `.chpm` directory...")
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
os.makedirs(BUILD_DIR, exist_ok=True)
print_info(f"Cloning `ports` repo ({PORTS_REPO})...")
ret = subprocess.call(["git", "clone", "--depth=1", PORTS_REPO, PORTS_DIR])
if ret != 0:
print_err(f"Failed to clone `ports` repo")
exit(1)
print_info(f"Successfully initialized")
@subcommand("update")
def subcommand_update(args: Namespace):
print_info("Updating `ports` repo...")
ret = subprocess.call(["git", "-C", PORTS_DIR, "pull", "origin", "master"])
if ret != 0:
print_err(f"Failed to update `ports` repo")
exit(1)
print_info(f"Successfully updated")
@subcommand("install")
def subcommand_install(args: Namespace):
# subcommand_update(args)
print() # for a new line
print_info(f"Installing packages into `{args.prefix}`...\n")
arch = args.arch
prefix = path.abspath(args.prefix)
os.environ["CHPM_TARGET_ARCH"] = arch
os.environ["CHPM_TARGET_PREFIX"] = prefix
for package_name in args.packages:
recipe = read_recipe(package_name, arch)
print_dbg("recipe:", recipe)
check_recipe_installable(recipe)
install_recipe(recipe, args.prefix, dev_mode=args.dev_mode)
@subcommand("list")
def subcommand_list(args: Namespace):
print_info(f"Listing packages installed into `{args.prefix}`...\n")
prefix = path.abspath(args.prefix)
pkginfos = read_all_pkginfos(prefix)
if pkginfos:
print_info("Installed packages:\n")
for pkginfo in pkginfos:
print_info(f"{pkginfo.name}={pkginfo.version}")
else:
print_info("No packages installed\n")
@subcommand("config")
def subcommand_config(args: Namespace):
pass
@subcommand("search")
def subcommand_search(args: Namespace):
subcommand_update(args)
print() # for a new line
keyword = args.keyword
print_dbg("search keyword:", keyword)
ports = os.listdir(PORTS_DIR)
result = []
for port in ports:
print_dbg("port:", port)
if keyword in port:
result.append(port)
print_dbg("search result:", result)
if result:
print_info("Search result:\n")
for pkg in result:
recipe_head = read_recipe_head(pkg)
assert recipe_head
print_info(
f"{recipe_head.name} ({recipe_head.version}):\n\t{recipe_head.description}"
)
def create_argparser() -> ArgumentParser:
parser = ArgumentParser(description="The ChCore Package Manager.")
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Verbose mode",
)
subparsers = parser.add_subparsers(dest="subcommand")
parser_init = subparsers.add_parser("init", help="initialize `.chpm` directory")
parser_init.add_argument(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="remove existing `.chpm` dir",
)
subparsers.add_parser("update", help="update `ports` repo")
parser_install = subparsers.add_parser("install", help="install package")
parser_install.add_argument(
"-p",
"--prefix",
dest="prefix",
default=DEFAULT_INSTALL_DIR,
help="prefix of install destination",
)
parser_install.add_argument(
"-a", "--arch", dest="arch", required=True, help="target architecture"
)
parser_install.add_argument(
"-d",
"--dev-mode",
dest="dev_mode",
action="store_true",
default=False,
help="enable development mode (temporary build directory won't be deleted)",
)
parser_install.add_argument("packages", nargs="+", help="packages to install")
parser_list = subparsers.add_parser("list", help="list installed package")
parser_list.add_argument(
"-p",
"--prefix",
dest="prefix",
default=DEFAULT_INSTALL_DIR,
help="prefix of install destination",
)
parser_config = subparsers.add_parser("config", help="change config")
parser_search = subparsers.add_parser("search", help="search for packages")
parser_search.add_argument("keyword", help="search keyword")
# TODO: other subcommand
return parser
def main():
parser = create_argparser()
args = parser.parse_args()
if not args.subcommand:
parser.print_help()
exit(1)
global print_dbg
if not args.verbose:
print_dbg = lambda *a, **kw: None
print_dbg("Command args:", args)
if not path.exists(CHPM_DIR) and args.subcommand != "init":
print_err("Please run `./chpm init` first")
exit(1)
subcommands[args.subcommand](args)
if __name__ == "__main__":
main()

29
docs/TreeSLS.md Normal file
View File

@ -0,0 +1,29 @@
# Implementation of TreeSLS
We implemented TreeSLS based on ChCore, which is an educational multicore microkernel that supports POSIX APIs through musl-libc.
TreeSLS's modification includes:
- checkpoint/restore module (`kernel/ckpt`)
- memory allocator module (`kernel/mm`)
- ipi module (`kernel/ipi`)
- several new syscalls (`kernel/syscall`)
- user applications (listed in `user/sample-apps/apps/treesls`)
## Checkpoint/Restore
TreeSLS adds a checkpoint/restore module (`kernel/ckpt`).
Whole-system checkpoint/restore can be taken through `sys_whole_ckpt` (`kernel/ckpt/ckpt.c`) and `sys_whole_restore` (`kernel/ckpt/restore.c`).
Hybrid page checkpointing is done through `process_sub_active_list()`, which is called in parallel to the main process of checkpointing.
## Memory Allocator
TreeSLS modifies the memory allocator (`kernel/mm`) by adding a lightweight journal when malloc and free pages from the buddy system.
The journal is taken through `prepare_latest_log()` in every malloc/free function.
## Other Modules (IPI, syscall, etc.)
TreeSLS also modified some other modules (IPI, syscall, etc.) to enable checkpointing.
1. IPI: add `sys_ipi_stop_all` and `sys_ipi_start_all`.
2. syscall: export `sys_whole_ckpt`, `sys_whole_restore`, and several supporting syscalls.

View File

@ -1,12 +1,12 @@
# SLS Basic Configruations
set(SLS_RESTORE ON)
set(SLS_RESTORE OFF)
set(SLS_EXT_SYNC OFF)
set(SLS_HYBRID_MEM ON)
# SLS Report Details
set(SLS_REPORT_CKPT ON)
set(SLS_REPORT_RESTORE ON)
set(SLS_REPORT_HYBRID ON)
set(SLS_REPORT_CKPT OFF)
set(SLS_REPORT_RESTORE OFF)
set(SLS_REPORT_HYBRID OFF)
# SLS special tests: for tests of only-checkpoint, +pf, +memcpy
set(SLS_SPECIAL_OMIT_PF OFF)

View File

@ -0,0 +1,89 @@
#
# config.mak.dist - sample musl-cross-make configuration
#
# Copy to config.mak and edit as desired.
#
# There is no default TARGET; you must select one here or on the make
# command line. Some examples:
# TARGET = i486-linux-musl
TARGET = x86_64-linux-musl
# TARGET = arm-linux-musleabi
# TARGET = arm-linux-musleabihf
# TARGET = sh2eb-linux-muslfdpic
# ...
# By default, cross compilers are installed to ./output under the top-level
# musl-cross-make directory and can later be moved wherever you want them.
# To install directly to a specific location, set it here. Multiple targets
# can safely be installed in the same location. Some examples:
OUTPUT = /home/musl-cross-make/install
# OUTPUT = /opt/cross
# OUTPUT = /usr/local
# By default, latest supported release versions of musl and the toolchain
# components are used. You can override those here, but the version selected
# must be supported (under hashes/ and patches/) to work. For musl, you
# can use "git-refname" (e.g. git-master) instead of a release. Setting a
# blank version for gmp, mpc, mpfr and isl will suppress download and
# in-tree build of these libraries and instead depend on pre-installed
# libraries when available (isl is optional and not set by default).
# Setting a blank version for linux will suppress installation of kernel
# headers, which are not needed unless compiling programs that use them.
# BINUTILS_VER = 2.25.1
# GCC_VER = 5.2.0
# MUSL_VER = git-master
# GMP_VER =
# MPC_VER =
# MPFR_VER =
# ISL_VER =
# LINUX_VER =
# By default source archives are downloaded with wget. curl is also an option.
# DL_CMD = wget -c -O
# DL_CMD = curl -C - -L -o
# Check sha-1 hashes of downloaded source archives. On gnu systems this is
# usually done with sha1sum.
# SHA1_CMD = sha1sum -c
# SHA1_CMD = sha1 -c
# SHA1_CMD = shasum -a 1 -c
# Something like the following can be used to produce a static-linked
# toolchain that's deployable to any system with matching arch, using
# an existing musl-targeted cross compiler. This only works if the
# system you build on can natively (or via binfmt_misc and qemu) run
# binaries produced by the existing toolchain (in this example, i486).
# COMMON_CONFIG += CC="i486-linux-musl-gcc -static --static" CXX="i486-linux-musl-g++ -static --static"
# Recommended options for smaller build for deploying binaries:
# COMMON_CONFIG += CFLAGS="-g0 -Os" CXXFLAGS="-g0 -Os" LDFLAGS="-s"
# Options you can add for faster/simpler build at the expense of features:
# COMMON_CONFIG += --disable-nls
# GCC_CONFIG += --disable-libquadmath --disable-decimal-float
# GCC_CONFIG += --disable-libitm
# GCC_CONFIG += --disable-fixed-point
# GCC_CONFIG += --disable-lto
# GCC_CONFIG += --enable-default-pie
# By default C and C++ are the only languages enabled, and these are
# the only ones tested and known to be supported. You can uncomment the
# following and add other languages if you want to try getting them to
# work too.
# GCC_CONFIG += --enable-languages=c,c++
# You can keep the local build path out of your toolchain binaries and
# target libraries with the following, but then gdb needs to be told
# where to look for source files.
# COMMON_CONFIG += --with-debug-prefix-map=$(CURDIR)=

View File

@ -0,0 +1,323 @@
#define _GNU_SOURCE
#include <malloc.h>
#include <errno.h>
#include <endian.h>
#include <stdbool.h>
#include "elf.h"
#define PAGE_SIZE 0x1000
/*
* TODO(MK): Simplify the duplications in the file.
*/
static bool is_elf_magic(struct elf_indent *indent)
{
return (indent->ei_magic[0] == 0x7F &&
indent->ei_magic[1] == 'E' &&
indent->ei_magic[2] == 'L' &&
indent->ei_magic[3] == 'F');
}
#define ELF_ENDIAN_LE(indent) ((indent).ei_data == 1)
#define ELF_ENDIAN_BE(indent) ((indent).ei_data == 2)
#define ELF_BITS_32(indent) ((indent).ei_class == 1)
#define ELF_BITS_64(indent) ((indent).ei_class == 2)
/**
* Parse an ELF file header. We use the 64-bit structure `struct elf_header` as
* the output structure.
*
* On error, the negative error code is returned.
* On success, 0 is returned, and the header is written in the given parameter.
*/
static int parse_elf_header(const char *code, struct elf_header *header)
{
struct elf_header *header_64 = (struct elf_header *)code;
struct elf_header_32 *header_32 = (struct elf_header_32 *)code;
if (!is_elf_magic(&header_64->e_indent)) {
return -EINVAL;
}
header->e_indent = *(struct elf_indent *)code;
if (ELF_ENDIAN_LE(header->e_indent)) {
/*
* For the first few bytes, both 32-bit and 64-bit ELF headers
* have the same field width. So, we simply use header_64 at
* first.
*/
header->e_type = le16toh(header_64->e_type);
header->e_machine = le16toh(header_64->e_machine);
header->e_version = le32toh(header_32->e_version);
if (ELF_BITS_32(header->e_indent)) {
header->e_entry = le32toh(header_32->e_entry);
header->e_phoff = le32toh(header_32->e_phoff);
header->e_shoff = le32toh(header_32->e_shoff);
header->e_flags = le32toh(header_32->e_flags);
header->e_ehsize = le16toh(header_32->e_ehsize);
header->e_phentsize = le16toh(header_32->e_phentsize);
header->e_phnum = le16toh(header_32->e_phnum);
header->e_shentsize = le16toh(header_32->e_shentsize);
header->e_shnum = le16toh(header_32->e_shnum);
header->e_shstrndx = le16toh(header_32->e_shstrndx);
} else if (ELF_BITS_64(header->e_indent)) {
header->e_entry = le64toh(header_64->e_entry);
header->e_phoff = le64toh(header_64->e_phoff);
header->e_shoff = le64toh(header_64->e_shoff);
header->e_flags = le32toh(header_64->e_flags);
header->e_ehsize = le16toh(header_64->e_ehsize);
header->e_phentsize = le16toh(header_64->e_phentsize);
header->e_phnum = le16toh(header_64->e_phnum);
header->e_shentsize = le16toh(header_64->e_shentsize);
header->e_shnum = le16toh(header_64->e_shnum);
header->e_shstrndx = le16toh(header_64->e_shstrndx);
} else {
return -EINVAL;
}
} else if (ELF_ENDIAN_BE(header->e_indent)) {
/*
* We use header_64 for the same reason as above.
*/
header->e_type = be16toh(header_64->e_type);
header->e_machine = be16toh(header_64->e_machine);
header->e_version = be32toh(header_32->e_version);
if (ELF_BITS_32(header->e_indent)) {
header->e_entry = be32toh(header_32->e_entry);
header->e_phoff = be32toh(header_32->e_phoff);
header->e_shoff = be32toh(header_32->e_shoff);
header->e_flags = be32toh(header_32->e_flags);
header->e_ehsize = be16toh(header_32->e_ehsize);
header->e_phentsize = be16toh(header_32->e_phentsize);
header->e_phnum = be16toh(header_32->e_phnum);
header->e_shentsize = be16toh(header_32->e_shentsize);
header->e_shnum = be16toh(header_32->e_shnum);
header->e_shstrndx = be16toh(header_32->e_shstrndx);
} else if (ELF_BITS_64(header->e_indent)) {
header->e_entry = be64toh(header_64->e_entry);
header->e_phoff = be64toh(header_64->e_phoff);
header->e_shoff = be64toh(header_64->e_shoff);
header->e_flags = be32toh(header_64->e_flags);
header->e_ehsize = be16toh(header_64->e_ehsize);
header->e_phentsize = be16toh(header_64->e_phentsize);
header->e_phnum = be16toh(header_64->e_phnum);
header->e_shentsize = be16toh(header_64->e_shentsize);
header->e_shnum = be16toh(header_64->e_shnum);
header->e_shstrndx = be16toh(header_64->e_shstrndx);
} else {
return -EINVAL;
}
} else {
return -EINVAL;
}
return 0;
}
/**
* Parse an ELF program header. We use the 64-bit structure
* `struct elf_program_header` as the output structure.
*
* On error, the negative error code is returned.
* On success, 0 is returned, and the header is written in the given parameter.
*/
static int parse_elf_program_header(const char *code,
const struct elf_header *elf,
struct elf_program_header *header)
{
struct elf_program_header *header_64;
struct elf_program_header_32 *header_32;
if (ELF_ENDIAN_LE(elf->e_indent)) {
if (ELF_BITS_32(elf->e_indent)) {
header_32 = (struct elf_program_header_32 *)code;
header->p_type = le32toh(header_32->p_type);
header->p_flags = le32toh(header_32->p_flags);
header->p_offset = le32toh(header_32->p_offset);
header->p_vaddr = le32toh(header_32->p_vaddr);
header->p_paddr = le32toh(header_32->p_paddr);
header->p_filesz = le32toh(header_32->p_filesz);
header->p_memsz = le32toh(header_32->p_memsz);
header->p_align = le32toh(header_32->p_align);
} else if (ELF_BITS_64(elf->e_indent)) {
header_64 = (struct elf_program_header *)code;
header->p_type = le32toh(header_64->p_type);
header->p_flags = le32toh(header_64->p_flags);
header->p_offset = le64toh(header_64->p_offset);
header->p_vaddr = le64toh(header_64->p_vaddr);
header->p_paddr = le64toh(header_64->p_paddr);
header->p_filesz = le64toh(header_64->p_filesz);
header->p_memsz = le64toh(header_64->p_memsz);
header->p_align = le64toh(header_64->p_align);
} else {
return -EINVAL;
}
} else if (ELF_ENDIAN_BE(elf->e_indent)) {
if (ELF_BITS_32(elf->e_indent)) {
header_32 = (struct elf_program_header_32 *)code;
header->p_type = be32toh(header_32->p_type);
header->p_flags = be32toh(header_32->p_flags);
header->p_offset = be32toh(header_32->p_offset);
header->p_vaddr = be32toh(header_32->p_vaddr);
header->p_paddr = be32toh(header_32->p_paddr);
header->p_filesz = be32toh(header_32->p_filesz);
header->p_memsz = be32toh(header_32->p_memsz);
header->p_align = be32toh(header_32->p_align);
} else if (ELF_BITS_64(elf->e_indent)) {
header_64 = (struct elf_program_header *)code;
header->p_type = be32toh(header_64->p_type);
header->p_flags = be32toh(header_64->p_flags);
header->p_offset = be64toh(header_64->p_offset);
header->p_vaddr = be64toh(header_64->p_vaddr);
header->p_paddr = be64toh(header_64->p_paddr);
header->p_filesz = be64toh(header_64->p_filesz);
header->p_memsz = be64toh(header_64->p_memsz);
header->p_align = be64toh(header_64->p_align);
} else {
return -EINVAL;
}
} else {
return -EINVAL;
}
return 0;
}
/**
* Parse an ELF section header. We use the 64-bit structure
* `struct elf_section_header` as the output structure.
*
* On error, the negative error code is returned.
* On success, 0 is returned, and the header is written in the given parameter.
*/
static int parse_elf_section_header(const char *code,
const struct elf_header *elf,
struct elf_section_header *header)
{
struct elf_section_header *header_64;
struct elf_section_header_32 *header_32;
if (ELF_ENDIAN_LE(elf->e_indent)) {
if (ELF_BITS_32(elf->e_indent)) {
header_32 = (struct elf_section_header_32 *)code;
header->sh_name = le32toh(header_32->sh_name);
header->sh_type = le32toh(header_32->sh_type);
header->sh_flags = le32toh(header_32->sh_flags);
header->sh_addr = le32toh(header_32->sh_addr);
header->sh_offset = le32toh(header_32->sh_offset);
header->sh_size = le32toh(header_32->sh_size);
header->sh_link = le32toh(header_32->sh_link);
header->sh_info = le32toh(header_32->sh_info);
header->sh_addralign = le32toh(header_32->sh_addralign);
header->sh_entsize = le32toh(header_32->sh_entsize);
} else if (ELF_BITS_64(elf->e_indent)) {
header_64 = (struct elf_section_header *)code;
header->sh_name = le32toh(header_64->sh_name);
header->sh_type = le32toh(header_64->sh_type);
header->sh_flags = le64toh(header_64->sh_flags);
header->sh_addr = le64toh(header_64->sh_addr);
header->sh_offset = le64toh(header_64->sh_offset);
header->sh_size = le64toh(header_64->sh_size);
header->sh_link = le32toh(header_64->sh_link);
header->sh_info = le32toh(header_64->sh_info);
header->sh_addralign = le64toh(header_64->sh_addralign);
header->sh_entsize = le64toh(header_64->sh_entsize);
} else {
return -EINVAL;
}
} else if (ELF_ENDIAN_BE(elf->e_indent)) {
if (ELF_BITS_32(elf->e_indent)) {
header_32 = (struct elf_section_header_32 *)code;
header->sh_name = be32toh(header_32->sh_name);
header->sh_type = be32toh(header_32->sh_type);
header->sh_flags = be32toh(header_32->sh_flags);
header->sh_addr = be32toh(header_32->sh_addr);
header->sh_offset = be32toh(header_32->sh_offset);
header->sh_size = be32toh(header_32->sh_size);
header->sh_link = be32toh(header_32->sh_link);
header->sh_info = be32toh(header_32->sh_info);
header->sh_addralign = be32toh(header_32->sh_addralign);
header->sh_entsize = be32toh(header_32->sh_entsize);
} else if (ELF_BITS_64(elf->e_indent)) {
header_64 = (struct elf_section_header *)code;
header->sh_name = be32toh(header_64->sh_name);
header->sh_type = be32toh(header_64->sh_type);
header->sh_flags = be64toh(header_64->sh_flags);
header->sh_addr = be64toh(header_64->sh_addr);
header->sh_offset = be64toh(header_64->sh_offset);
header->sh_size = be64toh(header_64->sh_size);
header->sh_link = be32toh(header_64->sh_link);
header->sh_info = be32toh(header_64->sh_info);
header->sh_addralign = be64toh(header_64->sh_addralign);
header->sh_entsize = be64toh(header_64->sh_entsize);
} else {
return -EINVAL;
}
} else {
return -EINVAL;
}
return 0;
}
void elf_free(struct elf_file *elf)
{
if (elf->s_headers)
free(elf->s_headers);
if (elf->p_headers)
free(elf->p_headers);
free(elf);
}
struct elf_file *elf_parse_file(const char *code)
{
struct elf_file *elf;
int err;
int i;
elf = malloc(sizeof(*elf));
if (!elf)
return NULL;
err = parse_elf_header(code, &elf->header);
if (err)
goto out_free_elf;
/* Allocate memory for program headers and section headers */
err = -ENOMEM;
elf->p_headers =
malloc(elf->header.e_phentsize * elf->header.e_phnum);
if (!elf->p_headers)
goto out_free_elf;
elf->s_headers =
malloc(elf->header.e_shentsize * elf->header.e_shnum);
if (!elf->s_headers)
goto out_free_elf_p;
/* Parse program headers and section headers */
for (i = 0; i < elf->header.e_phnum; ++i) {
err = parse_elf_program_header(code + elf->header.e_phoff +
elf->header.e_phentsize * i,
&elf->header,
&elf->p_headers[i]);
if (err)
goto out_free_all;
}
for (i = 0; i < elf->header.e_shnum; ++i) {
err = parse_elf_section_header(code + elf->header.e_shoff +
elf->header.e_shentsize * i,
&elf->header,
&elf->s_headers[i]);
if (err)
goto out_free_all;
}
return elf;
out_free_all:
free(elf->s_headers);
out_free_elf_p:
free(elf->p_headers);
out_free_elf:
free(elf);
return NULL;
}

View File

@ -0,0 +1,156 @@
#pragma once
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* ELF format according to
* https://en.wikipedia.org/wiki/Executable_and_Linkable_Format
*/
#define EI_MAG_SIZE 4
#define PT_NULL 0x00000000
#define PT_LOAD 0x00000001
#define PT_DYNAMIC 0x00000002
#define PT_INTERP 0x00000003
#define PT_NOTE 0x00000004
#define PT_SHLIB 0x00000005
#define PT_PHDR 0x00000006
#define PT_LOOS 0x60000000
#define PT_HIOS 0x6fffffff
#define PT_LOPROC 0x70000000
#define PT_HIRPOC 0x7fffffff
#define PF_ALL 0x7
#define PF_X 0x1
#define PF_W 0x2
#define PF_R 0x4
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
typedef int64_t s64;
typedef int32_t s32;
typedef int16_t s16;
typedef int8_t s8;
/*
* This part of ELF header is endianness-independent.
*/
struct elf_indent {
u8 ei_magic[4];
u8 ei_class;
u8 ei_data;
u8 ei_version;
u8 ei_osabi;
u8 ei_abiversion;
u8 ei_pad[7];
};
/*
* ELF header format. One should check the `e_indent` to decide the endianness.
*/
struct elf_header {
struct elf_indent e_indent;
u16 e_type;
u16 e_machine;
u32 e_version;
u64 e_entry;
u64 e_phoff;
u64 e_shoff;
u32 e_flags;
u16 e_ehsize;
u16 e_phentsize;
u16 e_phnum;
u16 e_shentsize;
u16 e_shnum;
u16 e_shstrndx;
};
/*
* 32-Bit of the elf_header. Check the `e_indent` first to decide.
*/
struct elf_header_32 {
struct elf_indent e_indent;
u16 e_type;
u16 e_machine;
u32 e_version;
u32 e_entry;
u32 e_phoff;
u32 e_shoff;
u32 e_flags;
u16 e_ehsize;
u16 e_phentsize;
u16 e_phnum;
u16 e_shentsize;
u16 e_shnum;
u16 e_shstrndx;
};
struct elf_program_header {
u32 p_type;
u32 p_flags;
u64 p_offset;
u64 p_vaddr;
u64 p_paddr;
u64 p_filesz;
u64 p_memsz;
u64 p_align;
};
struct elf_program_header_32 {
u32 p_type;
u32 p_offset;
u32 p_vaddr;
u32 p_paddr;
u32 p_filesz;
u32 p_memsz;
u32 p_flags;
u32 p_align;
};
struct elf_section_header {
u32 sh_name;
u32 sh_type;
u64 sh_flags;
u64 sh_addr;
u64 sh_offset;
u64 sh_size;
u32 sh_link;
u32 sh_info;
u64 sh_addralign;
u64 sh_entsize;
};
struct elf_section_header_32 {
u32 sh_name;
u32 sh_type;
u32 sh_flags;
u32 sh_addr;
u32 sh_offset;
u32 sh_size;
u32 sh_link;
u32 sh_info;
u32 sh_addralign;
u32 sh_entsize;
};
struct elf_file {
struct elf_header header;
struct elf_program_header *p_headers;
struct elf_section_header *s_headers;
};
struct elf_file *elf_parse_file(const char *code);
void elf_free(struct elf_file *elf);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,99 @@
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <malloc.h>
#include "elf.h"
#define le16_to_cpu(x) (x)
#define le32_to_cpu(x) (x)
#define le64_to_cpu(x) (x)
#define be16_to_cpu(x) ((((x) & 0xff) << 8) | (((x) >> 8) & 0xff))
#define be32_to_cpu(x) ((be16_to_cpu((x)) << 16) | (be16_to_cpu((x) >> 16)))
#define be64_to_cpu(x) ((be32_to_cpu((x)) << 32) | (be32_to_cpu((x) >> 32)))
#define be128ptr_to_cpu_hi(x) (be64_to_cpu(*(u64 *)(x)))
#define be128ptr_to_cpu_lo(x) (be64_to_cpu(*((u64 *)(x) + 1)))
#define be96ptr_to_cpu_hi(x) (be32_to_cpu(*(u32 *)(x)))
#define be96ptr_to_cpu_lo(x) (((u64)(be32_to_cpu(*((u32 *)(x) + 1)))) << 32 | \
(be32_to_cpu(*((u32 *)(x)) + 2)))
struct elf_info {
u64 mem_size;
u64 entry;
u64 flags;
u64 phentsize;
u64 phnum;
u64 phdr_addr;
};
void get_elf_info(const char *binary, struct elf_info *info)
{
struct elf_file *elf;
int i;
u64 size = 0;
elf = elf_parse_file(binary);
elf->header.e_phentsize;
if (!elf) {
printf("parse elf fail\n");
return;
}
for (i = 0; i < elf->header.e_phnum; ++i) {
if (elf->p_headers[i].p_type != PT_LOAD)
continue;
size += elf->p_headers[i].p_memsz;
}
info->entry = elf->header.e_entry;
info->flags = elf->header.e_flags;
info->mem_size = size;
info->phentsize = elf->header.e_phentsize;
info->phnum = elf->header.e_phnum;
info->phdr_addr = elf->p_headers[0].p_vaddr + elf->header.e_phoff;
free(elf);
}
int main(int argc, char *argv[])
{
int fd;
struct stat st;
char *buf;
struct elf_info info;
if (argc == 1) {
printf("Need a path points to the procmgr.elf\n");
}
fd = open(argv[1], O_RDONLY);
if (fd < 0) {
printf("Can not open elf file!\n");
}
fstat(fd, &st);
buf = malloc(st.st_size);
read(fd, buf, st.st_size);
get_elf_info(buf, &info);
free(buf);
close(fd);
info.entry = be64_to_cpu(info.entry);
info.flags = be64_to_cpu(info.flags);
info.mem_size = be64_to_cpu(info.mem_size);
info.phentsize = be64_to_cpu(info.phentsize);
info.phnum = be64_to_cpu(info.phnum);
info.phdr_addr = be64_to_cpu(info.phdr_addr);
fd = open("./elf_info.temp", O_CREAT | O_RDWR);
if (fd < 0) {
printf("Create file failed!\n");
}
write(fd, (void *)&info, sizeof(struct elf_info));
close(fd);
return 0;
}

View File

@ -49,18 +49,6 @@ add_custom_command(
DEPENDS)
endif()
# libevent is already installed in docker images
if(CHCORE_DEMOS_LIBEVENT)
add_custom_target(
libevent ALL
WORKING_DIRECTORY ${build_demos_dir}/libevent
COMMAND ./autogen.sh
COMMAND ./configure --disable-openssl --disable-debug-mode --disable-samples -prefix=/usr/libevent
COMMAND CC=${CHCORE_MUSL_LIBC_INSTALL_DIR}/bin/musl-gcc -s &> /dev/null
COMMAND make install
)
endif()
if(CHCORE_DEMOS_MEMCACHED)
add_custom_target(
memcached ALL

View File

@ -21,6 +21,7 @@ SHORT_TERM_TODO
release.h
src/transfer.sh
src/configs
src/redis-test
redis.ds
src/redis.conf
src/nodes.conf

View File

@ -1,4 +1,4 @@
chcore_enable_clang_tidy()
# chcore_enable_clang_tidy()
add_executable(chcore_shell.bin chcore_shell.c job_control.c buildin_cmd.c
handle_input.c)

View File

@ -1,7 +1,7 @@
chcore_enable_clang_tidy(
EXTRA_CHECKS -bugprone-implicit-widening-of-multiplication-result)
# chcore_enable_clang_tidy(
# EXTRA_CHECKS -bugprone-implicit-widening-of-multiplication-result)
add_library(fat32_3rd_party OBJECT ffunicode.c ff.c)
chcore_enable_clang_tidy()
# chcore_enable_clang_tidy()
add_executable(fat32.srv diskio.c ff_server.c ffsystem.c fat32_ops.c
fat32_utils.c)
target_link_libraries(fat32.srv PRIVATE fs_base fat32_3rd_party)

View File

@ -1,4 +1,4 @@
chcore_enable_clang_tidy()
# chcore_enable_clang_tidy()
add_executable(procmgr.srv proc_node.c procmgr.c recycle.c shell_msg_handler.c
srvmgr.c)
target_link_options(procmgr.srv PRIVATE -static)

View File

@ -1,5 +1,5 @@
chcore_enable_clang_tidy(
EXTRA_CHECKS -performance-no-int-to-ptr,-clang-analyzer-deadcode.DeadStores)
# chcore_enable_clang_tidy(
# EXTRA_CHECKS -performance-no-int-to-ptr,-clang-analyzer-deadcode.DeadStores)
message("CMAKE_C_CLANG_TIDY: ${CMAKE_C_CLANG_TIDY}")
set(binary_path ${build_user_dir}/ramdisk.cpio)
configure_file(incbin_ramdisk.tpl.S incbin_ramdisk.S)