commit e51b53c2090406140ec6f0bd547399702ca7616b
parent 64fe7a5ee05c16cc490c1b8e5ff15ad84b71c09c
Author: arjoonn <arjoonn@noreply.localhost>
Date: Sat, 25 Mar 2023 04:58:41 +0000
Executor configuration passthrough (!65)
Reviewed-on: https://gitea.midpathsoftware.com/midpath/jaypore_ci/pulls/65
╔ 🟢 : JayporeCI [sha 9332217512]
┏━ build-and-test
┃
┃ 🟢 : JciEnv [f96a45c9] 0:11
┃ 🟢 : Jci [4655d2f3] 0:18 ❮-- ['JciEnv']
┃ 🟢 : black [839c1f8b] 0: 0 ❮-- ['JciEnv']
┃ 🟢 : install-test [bfd667a5] 0: 0 ❮-- ['JciEnv']
┃ 🟢 : pylint [2ca0b979] 0:10 ❮-- ['JciEnv']
┃ 🟢 : pytest [ced905c1] 0:26 Cov: 89% ❮-- ['JciEnv']
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
Diffstat:
5 files changed, 115 insertions(+), 41 deletions(-)
diff --git a/docs/source/examples/extra_hosts.py b/docs/source/examples/extra_hosts.py
@@ -0,0 +1,17 @@
+from jaypore_ci import jci
+
+with jci.Pipeline() as p:
+ p.job(
+ "Pytest",
+ "pytest",
+ executor_kwargs={
+ "extra_hosts": {
+ # Access machines behind VPNs
+ "machine.behind.vpn": "100.64.0.12",
+ # Redirect localhost addresses to the docker gateway
+ "dozzle.localhost": "172.0.0.1",
+ # Replace production APIs with locally mocked APIs
+ "api.myservice.com": "127.0.0.1",
+ }
+ },
+ )
diff --git a/docs/source/index.rst b/docs/source/index.rst
@@ -370,6 +370,23 @@ This is not the recommended way however since it would be a lot easier to make
`cicd/cicd.py` a proper python package instead and put the two configs there
itself.
+Passing extra_hosts and other arguments to docker
+-------------------------------------------------
+
+Often times you want to configure some extra stuff for the docker run command
+that will be used to run your job, like when you want to pass `extra_hosts` or
+`device_requests` to the container.
+
+To do such things you can use the `executor_kwargs` argument while defining the
+job using :method:`~jaypore_ci.jci.Pipeline.job`. Anything that you pass to
+this dictionary will be handed off to `Docker-py
+<https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run>`_
+and so you can use anything that is mentioned in that documentation.
+
+.. literalinclude:: examples/extra_hosts.py
+ :language: python
+ :linenos:
+
Contributing
============
diff --git a/jaypore_ci/changelog.py b/jaypore_ci/changelog.py
@@ -6,6 +6,17 @@ CHANGE = "⚙️"
BUGFIX = "🐞"
version_map = {
+ V("0.2.30"): {
+ "changes": [
+ (
+ f"{NEW}: You can pass arbitrary arguments to the `docker run` "
+ "command simply by using the `executor_kwargs` argument while "
+ "defining the job. Read more in `Passing extra_hosts and other "
+ "arguments to docker`_."
+ )
+ ],
+ "instructions": [],
+ },
V("0.2.29"): {
"changes": [
(
diff --git a/jaypore_ci/executors/docker.py b/jaypore_ci/executors/docker.py
@@ -1,6 +1,8 @@
"""
A docker executor for Jaypore CI.
"""
+from copy import deepcopy
+
import pendulum
import docker
from rich import print as rprint
@@ -13,9 +15,10 @@ from jaypore_ci.logging import logger
class Docker(Executor):
"""
- Run jobs via docker.
+ Run jobs via docker. To communicate with docker we use the `Python docker
+ sdk <https://docker-py.readthedocs.io/en/stable/client.html>`_.
- This will:
+ Using this executor will:
- Create a separate network for each run
- Run jobs as part of the network
- Clean up all jobs when the pipeline exits.
@@ -139,20 +142,36 @@ class Docker(Executor):
In case something goes wrong it will raise TriggerFailed
"""
assert self.pipe_id is not None, "Cannot run job if pipe id is not set"
+ ex_kwargs = deepcopy(job.executor_kwargs)
+ env = job.get_env()
+ env.update(ex_kwargs.pop("environment", {}))
trigger = {
"detach": True,
- "environment": job.get_env(),
- "volumes": [
- "/var/run/docker.sock:/var/run/docker.sock",
- "/usr/bin/docker:/usr/bin/docker:ro",
- "/tmp/jayporeci__cidfiles:/jaypore_ci/cidfiles:ro",
- f"/tmp/jayporeci__src__{self.pipeline.remote.sha}:/jaypore_ci/run",
- ],
+ "environment": env,
+ "volumes": list(
+ set(
+ [
+ "/var/run/docker.sock:/var/run/docker.sock",
+ "/usr/bin/docker:/usr/bin/docker:ro",
+ "/tmp/jayporeci__cidfiles:/jaypore_ci/cidfiles:ro",
+ f"/tmp/jayporeci__src__{self.pipeline.remote.sha}:/jaypore_ci/run",
+ ]
+ + (ex_kwargs.pop("volumes", []))
+ )
+ ),
"name": self.get_job_name(job),
"network": self.get_net(),
"image": job.image,
"command": job.command if not job.is_service else None,
}
+ for key, value in ex_kwargs.items():
+ if key in trigger:
+ self.logging().warning(
+ f"Overwriting existing value of `{key}` for job trigger.",
+ old_value=trigger[key],
+ new_value=value,
+ )
+ trigger[key] = value
if not job.is_service:
trigger["working_dir"] = "/jaypore_ci/run"
if not job.is_service:
diff --git a/jaypore_ci/jci.py b/jaypore_ci/jci.py
@@ -83,27 +83,37 @@ class Job: # pylint: disable=too-many-instance-attributes
It is never created manually. The correct way to create a job is to use
:meth:`~jaypore_ci.jci.Pipeline.job`.
- :param name: The name for the job. Names must be unique across jobs
- and stages.
- :param command: The command that we need to run for the job. It can be
- set to `None` when `is_service` is True.
- :param is_service: Is this job a service or not? Service jobs are assumed
- to be :class:`~jaypore_ci.interfaces.Status.PASSED` as
- long as they start. They are shut down when the entire
- pipeline has finished executing.
- :param pipeline: The pipeline this job is associated with.
- :param status: The :class:`~jaypore_ci.interfaces.Status` of this job.
- :param image: What docker image to use for this job.
- :param timeout: Defines how long a job is allowed to run before being
- killed and marked as
- class:`~jaypore_ci.interfaces.Status.FAILED`.
- :param env: A dictionary of environment variables to pass to the
- docker run command.
- :param children: Defines which jobs depend on this job's output status.
- :param parents: Defines which jobs need to pass before this job can be
- run.
- :param stage: What stage the job belongs to. This stage name must
- exist so that we can assign jobs to it.
+ :param name: The name for the job. Names must be unique across
+ jobs and stages.
+ :param command: The command that we need to run for the job. It can
+ be set to `None` when `is_service` is True.
+ :param is_service: Is this job a service or not? Service jobs are
+ assumed to be
+ :class:`~jaypore_ci.interfaces.Status.PASSED` as
+ long as they start. They are shut down when the
+ entire pipeline has finished executing.
+ :param pipeline: The pipeline this job is associated with.
+ :param status: The :class:`~jaypore_ci.interfaces.Status` of this job.
+ :param image: What docker image to use for this job.
+ :param timeout: Defines how long a job is allowed to run before being
+ killed and marked as
+ class:`~jaypore_ci.interfaces.Status.FAILED`.
+ :param env: A dictionary of environment variables to pass to
+ the docker run command.
+ :param children: Defines which jobs depend on this job's output
+ status.
+ :param parents: Defines which jobs need to pass before this job can
+ be run.
+ :param stage: What stage the job belongs to. This stage name must
+ exist so that we can assign jobs to it.
+ :param executor_kwargs: A dictionary of keyword arguments that the executor
+ can use when running a job. Different executors may
+ use this in different ways, for example with the
+ :class:`~jaypore_ci.executors.docker.Docker`
+ executor this may be used to run jobs with
+ `--add-host or --device
+ <https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run>`_
+ .
"""
def __init__(
@@ -113,13 +123,15 @@ class Job: # pylint: disable=too-many-instance-attributes
pipeline: "Pipeline",
*,
status: str = None,
- image: str = None,
- timeout: int = None,
- env: dict = None,
children: List["Job"] = None,
parents: List["Job"] = None,
is_service: bool = False,
stage: str = None,
+ # --- executor kwargs
+ image: str = None,
+ timeout: int = None,
+ env: dict = None,
+ executor_kwargs: dict = None,
):
self.name = name
self.command = command
@@ -133,6 +145,7 @@ class Job: # pylint: disable=too-many-instance-attributes
self.parents = parents if parents is not None else []
self.is_service = is_service
self.stage = stage
+ self.executor_kwargs = executor_kwargs if executor_kwargs is not None else {}
# --- run information
self.logs = defaultdict(list)
self.job_id = id(self)
@@ -239,15 +252,12 @@ class Job: # pylint: disable=too-many-instance-attributes
2. Stage
3. Job
"""
- return {
- **{
- k[len(PREFIX) :]: v
- for k, v in os.environ.items()
- if k.startswith(PREFIX)
- },
- **self.pipeline.pipe_kwargs.get("env", {}),
- **self.env,
+ env = {
+ k[len(PREFIX) :]: v for k, v in os.environ.items() if k.startswith(PREFIX)
}
+ env.update(self.pipeline.pipe_kwargs.get("env", {}))
+ env.update(self.env) # Includes env specified in stage kwargs AND job kwargs
+ return env
class Pipeline: # pylint: disable=too-many-instance-attributes