commit 033ea779e2911e65f196b4d4f1b8c2cfb00b7e28
parent b8d8ece3f7014296a30a39266a11a84f5e997be4
Author: arjoonn <arjoonn@noreply.localhost>
Date: Wed, 18 Jan 2023 07:39:56 +0000
run_times (!24)
Branch auto created by JayporeCI
```jayporeci
╔ 🟢 : JayporeCI [sha d27e62db8d]
┏━ Docker
┃
┃ 🟢 : JciEnv [281eaca6]
┃ 🟢 : Jci [33e0040a]
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
┏━ Jobs
┃
┃ 🟢 : pylint [e060352d]
┃ 🟢 : black [4651d772]
┃ 🟢 : pytest [d2a15801]
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
┏━ Publish
┃
┃ 🟢 : PublishPypi [17d2e10e]
┃ 🟢 : DockerHubJci [63b4f679]
┃ 🟢 : PublishDocs [d0f70853]
┃ 🟢 : DockerHubJcienv [eb563b04]
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
```
Co-authored-by: arjoonn sharma <arjoonn@midpathsoftware.com>
Reviewed-on: https://gitea.midpathsoftware.com/midpath/jaypore_ci/pulls/24
Diffstat:
8 files changed, 283 insertions(+), 299 deletions(-)
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
@@ -1,206 +0,0 @@
-Examples
-========
-
-This document lists things that you can do using JayporeCI
-
-Cache env dependencies in docker
---------------------------------
-
-You can cache your environment dependencies in docker easily.
-
-.. code-block:: python
-
- from jaypore_ci import jci
-
- with jci.Pipeline() as p:
- image = f"myproject:{p.remote.sha}"
- p.job("Docker", f"docker build -t {image} .")
- p.job(
- "PyTest",
- "python3 -m pytest tests/",
- image=image,
- depends_on=["Docker"]
- )
-
-
-Complex dependencies between jobs
----------------------------------
-
-- A pipeline can have stages.
-- Stages are executed one after the other.
-- Jobs inside a stage are all run in parallel
- - **unless** a job declares what other jobs it `depends_on`.
-
-
-For example, this config builds docker images, runs linting, testing on the
-codebase, then builds and publishes documentation.
-
-
-.. code-block:: python
-
- from jaypore_ci import jci
-
- with jci.Pipeline() as p:
- image = f"myproject_{p.remote.sha}"
-
- with p.stage("build"):
- p.job("DockProd", f"docker build --target ProdEnv -t {image}_prod .")
- p.job("DockDev", f"docker build --target DevEnv -t {image}_dev .")
-
- with p.stage("checking", image=f"{image}_dev"):
- p.job("UnitTest", "python3 -m pytest -m unit tests/")
- p.job("PyLint", "python3 -m pylint src/")
- p.job("Black", "python3 -m black --check .")
- p.job(
- "IntegrationTest",
- "python3 -m pytest -m integration tests/",
- depends_on=["PyLint", "UnitTest"],
- )
- p.job(
- "RegressionTest",
- "python3 -m pytest -m regression tests/",
- depends_on=["PyLint", "UnitTest"],
- )
- p.job(
- "FuzzyTest",
- "python3 -m pytest -m fuzzy tests/",
- depends_on=["PyLint", "UnitTest"],
- )
-
- with p.stage("publish"):
- p.job("TagProd", f"docker tag -t {image}_prod hub/{image}_prod:{p.remote.sha}")
- p.job("TagDev", f"docker tag -t {image}_dev hub/{image}_dev:{p.remote.sha}")
- p.job(
- "PushProd",
- f"docker push hub/{image}_prod:{p.remote.sha}",
- depends_on=["TagProd"],
- )
- p.job(
- "PushDev",
- f"docker push hub/{image}_dev:{p.remote.sha}",
- depends_on=["TagDev"],
- )
- p.job(
- "BuildDocs",
- "sphinx-build -b html docs/source/ docs/build/html",
- image=f"{image}_dev"
- )
-
-
-Matrix jobs
------------
-
-There is no special concept for matrix jobs. Just declare as many jobs as you want. There is a function to help you do this though.
-
-.. code-block:: python
-
- from jaypore_ci import jci
-
- with jci.Pipeline() as p:
- # This will have 18 jobs
- # one for each possible combination of BROWSER, SCREENSIZE, ONLINE
- for env in p.env_matrix(
- BROWSER=["firefox", "chromium", "webkit"],
- SCREENSIZE=["phone", "laptop", "extended"],
- ONLINE=["online", "offline"],
- ):
- p.job(f"Test: {env}", "python3 -m pytest tests", env=env)
-
-The above config generates 3 x 3 x 2 = 18 jobs and sets the environment for each to a unique combination of `BROWSER` , `SCREENSIZE`, and `ONLINE`.
-
-Running on cloud/remote machine
--------------------------------
-
-- Make sure docker is installed on the remote machine.
-- Make sure you have ssh access to remote machine and the user you are logging in as can run docker commands.
-- Add to your local `~.ssh/config` an entry for your remote machine. Something like:
-
- .. code-block:: config
-
- Host my.aws.machine
- HostName some.aws.machine
- IdentityFile ~/.ssh/id_rsa
-- Now in your `cicd/pre-push.sh` file, where the `docker run` command is mentioned, simply add `DOCKER_HOST=ssh://my.aws.machine`
-- JayporeCi will then run on the remote machine.
-
-Having database / other services during CICD
---------------------------------------------
-
-
-.. code-block:: python
-
- from jaypore_ci import jci
-
- # Services immediately return with a PASSED status
- # If they exit with a Non ZERO code they are marked as FAILED, otherwise
- # they are assumed to be PASSED
- with jci.Pipeline() as p:
- with p.stage("Services", is_service=True):
- p.job("Mysql", None, image="mysql")
- p.job("Redis", None, image="redis")
- p.job("Api", "python3 -m src.run_api", image="python:3.11")
- with p.stage("Testing"):
- p.job("UnitTest", "python3 -m pytest -m unit_tests tests")
- p.job("IntegrationTest", "python3 -m pytest -m integration_tests tests")
- p.job("RegressionTest", "python3 -m pytest -m regression_tests tests")
-
-Common jobs for multiple git repos
-----------------------------------
-
-- Sometimes we need to enforce common jobs for multiple git projects. A few examples:
-- A common lint policy for company / clients.
-- Common deploy targets and processes for things like docs / release notes.
-- Common locations for built targets / artifact caches.
-- Common notification targets like slack / telegram / email.
-- Common PR description checklist for company / clients.
-- Common PR merge policies / review policies etc.
-
-Since `JayporeCI` has a normal programming language as it's config language, these things can be solved without too much effort.
-
-1. Create a custom python file and add your common jobs to a function in that
- file. For example if we want to make sure that `Black
- <https://github.com/psf/black>`_ is the code formatter for all your
- projects:
-
- .. code-block:: python
-
- # mycommonjobs.py
- def add_common_lint_jobs(p):
- p.job("black", "python3 -m black --check .")
-
-2. Create your own docker file based on top of `arjoonn/jci:latest` and add your own code to it. For example:
-
- .. code-block:: dockerfile
-
- from arjoonn/jci:latest
- run python -m pip install black
- add mycommonjobs.py .
-
- After this you can build and publish this image to dockerhub. If you don't
- want to publish this image you can simply make sure that it is available on
- the machine that will run your CI.
-
-3. Now in any project you can use this docker image in `cicd/pre-push.sh`
- instead of `arjoonn/jci:latest`. For example if you pushed this image to
- dockerhub with the name `myown/jobs:latest` then you can edit
- your `cicd/pre-push.sh` file to have the docker run command look something
- like this:
-
- .. code-block:: bash
-
- docker run -d \
- # ... Other parameters as it is ...
- myown/jobs:latest \ # Instead of arjoonn/jci:latest
- # ... Other parameters as it is ...
-
-4. Inside `cicd/cicd.py` you can now simply import and call your common code function to add those common jobs:
-
- .. code-block:: python
-
- from jaypore_ci import jci
- from mycommonjobs import add_common_lint_jobs
-
- with jci.Pipeline() as p:
- add_common_lint_jobs(p)
- # ---
- p.job("Test", "pytest -m unit_tests tests")
diff --git a/docs/source/ideas.rst b/docs/source/ideas.rst
@@ -1,34 +0,0 @@
-Ideas
-=====
-
-I'm tired of
-------------
-
-- Spending hours figuring out how to do stuff in YAML configs.
-- Shift to something else when pricing changes.
-- Debugging why stuff is not working only in the CI system.
-- Not being able to run CI without internet.
-- In the case of self CI runners hosted on laptops, I don't want my laptop bogged down by jobs that other people pushed.
-
-
-What I like about existing systems
-----------------------------------
-
-- Use docker to run things.
-- Stateless job runs.
-- Job graphs showing status of the run.
-- We cannot merge PRs unless the pipeline passes.
-- Able to handle multiple languages easily in the same project.
-
-
-Concepts used
--------------
-
-- We use a git hook as a trigger mechanism. There is no possibility that some "CI server" is down.
-- Jobs are run on the machine that pushed the job by default. If you write bad code, your machine suffers first.
-- CI run status is posted directly in the PR description. You don't have to click and reach another website to see what your job is doing.
-- All jobs run inside docker containers.
-- Use `SOPS <https://github.com/mozilla/sops>`_ to manage secrets during CI.
- - `.gitignore` your encryption key files.
- - Commit your secrets in an encrypted form.
- - Update your `pre-push.sh` file to inject the secrets into your CI environment.
diff --git a/docs/source/index.rst b/docs/source/index.rst
@@ -3,55 +3,59 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
-Welcome to Jaypore CI's documentation!
-======================================
+Jaypore CI Documentation
+========================
-**Jaypore CI** is a small system for continuous integration / testing / delivery.
+**Jaypore CI** is a *small*, *very flexible*, and *powerful* system for automation within software projects.
-It is different from the usual suspects like github actions, gitlab CI, drone CI and so on.
-- The configuration language is python.
-- CI runs on your local machine by default.
-- There is no "server". You can run offline.
+TLDR
+----
+- Python is the config language.
+ - No more debugging YAML configs / escape errors.
+ - It's a general purpose programming language! Go wild in how / where / when you want your CI to run.
+- Jobs are run via docker; on your laptop.
+ - No more debugging why stuff is only working in local.
+ - You can exec into a running job / see full logs / inspect and even run a debugger on a live job without too much effort.
+- Runs offline so I can work without internet.
+ - If needed CAN run on cloud runners.
-For example, here's a CI pipeline for a project.
-.. code-block:: python
-
- from jaypore_ci import jci
-
- with jci.Pipeline(image='mydockerhub/env_image') as p:
- p.job("Black", "black --check .")
- p.job("Pylint", "pylint mycode/ tests/")
- p.job("PyTest", "pytest tests/", depends_on=["Black", "Pylint"])
+Contents
+--------
+.. contents::
Getting Started
-===============
+========
Installation
------------
-
-To use **Jaypore CI**, first install it using a bash script.
+You can easily install it using a bash script.
.. code-block:: console
$ curl https://get.jayporeci.in | bash
-Doing this will:
+**Or** you can manually install it. These names are convention, you can call your folders/files anything.
-1. Create a directory called `cicd` in the root of your repo.
-2. Create a file `cicd/pre-push.sh`
-3. Create a file `cicd/cicd.py`
-4. Update your repo's pre-push git hook so that it runs the `cicd/pre-push.sh` file when you push.
+1. Create a directory called *cicd* in the root of your repo.
+2. Create a file *cicd/pre-push.sh*
+3. Create a file *cicd/cicd.py*
+4. Update your repo's pre-push git hook so that it runs the *cicd/pre-push.sh* file when you push.
-Basic config
+How it works
------------
+1. Git hook calls `cicd/pre-push.sh`
+2. After doing some administration stuff, `cicd/pre-push.sh` calls `cicd/cicd.py`
+3. As per your config, `cicd/cicd.py` will run your jobs within docker.
+
+
Your entire config is inside `cicd/cicd.py`. Edit it to whatever you like! A basic config would look like this:
.. code-block:: python
@@ -63,22 +67,228 @@ Your entire config is inside `cicd/cicd.py`. Edit it to whatever you like! A bas
p.job("Pylint", "pylint mycode/ tests/")
p.job("PyTest", "pytest tests/")
+Examples
+========
+
+
+Dependencies in docker
+----------------------
-After you make these changes you can `git add -Av` and `git commit -m 'added Jaypore CI'`.
+Environment / package dependencies can be cached in docker easily. Simply build
+your docker image and then run the job with that built image.
-When you do a `git push origin`, that's when the CI system will get triggered and will run the CI.
+.. code-block:: python
-This config will run three jobs in parallel, using the `mydocker/image` docker image.
+ from jaypore_ci import jci
-See :doc:`examples` for more complex examples and :doc:`ideas` for understanding how it works.
+ with jci.Pipeline() as p:
+ p.job("Docker", f"docker build -t myimage .")
+ p.job(
+ "PyTest",
+ "python3 -m pytest tests/",
+ image="myimage",
+ depends_on=["Docker"]
+ )
-Contents
---------
+Complex job relations
+---------------------
+
+- A pipeline can have stages.
+- Stages are executed one after the other.
+- Jobs inside a stage are all run in parallel **unless** a job declares what other jobs it `depends_on`.
+- Keyword arguments can be set at `Pipeline`, `stage`, and `job` level. For
+ example you can set `env` vars / what docker image to use and so on.
+
+
+For example, this config builds docker images, runs linting, testing on the
+codebase, then builds and publishes documentation.
+
+
+.. code-block:: python
+
+ from jaypore_ci import jci
+
+ with jci.Pipeline() as p:
+ image = f"myproject_{p.remote.sha}"
+
+ with p.stage("build"):
+ p.job("DockDev", f"docker build --target DevEnv -t {image}_dev .")
+
+ with p.stage("checking", image=f"{image}_dev"):
+ p.job("UnitTest", "python3 -m pytest -m unit tests/")
+ p.job("PyLint", "python3 -m pylint src/")
+ p.job("Black", "python3 -m black --check .")
+ p.job(
+ "IntegrationTest",
+ "python3 -m pytest -m integration tests/",
+ depends_on=["PyLint", "UnitTest"],
+ )
+ p.job(
+ "RegressionTest",
+ "python3 -m pytest -m regression tests/",
+ depends_on=["PyLint", "UnitTest"],
+ )
+ p.job(
+ "FuzzyTest",
+ "python3 -m pytest -m fuzzy tests/",
+ depends_on=["PyLint", "UnitTest"],
+ )
+
+ with p.stage("publish"):
+ p.job("TagProd", f"docker tag -t {image}_prod hub/{image}_prod:{p.remote.sha}")
+ p.job("TagDev", f"docker tag -t {image}_dev hub/{image}_dev:{p.remote.sha}")
+ p.job(
+ "PushProd",
+ f"docker push hub/{image}_prod:{p.remote.sha}",
+ depends_on=["TagProd"],
+ )
+ p.job(
+ "PushDev",
+ f"docker push hub/{image}_dev:{p.remote.sha}",
+ depends_on=["TagDev"],
+ )
+ p.job(
+ "BuildDocs",
+ "sphinx-build -b html docs/source/ docs/build/html",
+ image=f"{image}_dev"
+ )
+
+
+Job matrix
+----------
+
+There is no special concept for matrix jobs. Just declare as many jobs as you
+want in a while loop. There is a function to make this easier when you want to
+run combinations of variables.
+
+.. code-block:: python
+
+ from jaypore_ci import jci
+
+ with jci.Pipeline() as p:
+ # This will have 18 jobs
+ # one for each possible combination of BROWSER, SCREENSIZE, ONLINE
+ for env in p.env_matrix(
+ BROWSER=["firefox", "chromium", "webkit"],
+ SCREENSIZE=["phone", "laptop", "extended"],
+ ONLINE=["online", "offline"],
+ ):
+ p.job(f"Test: {env}", "python3 -m pytest tests", env=env)
+
+The above config generates 3 x 3 x 2 = 18 jobs and sets the environment for each to a unique combination of `BROWSER` , `SCREENSIZE`, and `ONLINE`.
+
+Cloud/remote runners
+--------------------
+
+- Make sure docker is installed on the remote machine.
+- Make sure you have ssh access to remote machine and the user you are logging in as can run docker commands.
+- Add to your local `~.ssh/config` an entry for your remote machine. Something like:
+
+ .. code-block:: config
+
+ Host my.aws.machine
+ HostName some.aws.machine
+ IdentityFile ~/.ssh/id_rsa
+- Now in your `cicd/pre-push.sh` file, where the `docker run` command is mentioned, simply add `DOCKER_HOST=ssh://my.aws.machine`
+- JayporeCi will then run on the remote machine.
+
+DB Services
+-----------
+
+Some jobs don't affect the status of the pipeline. They just need to be there
+while you are running your tests. For example, you might need a DB to run API
+testing, or you might need both the DB and API as a service to run integration
+testing.
+
+To do this you can add `is_service=True` to the job / stage / pipeline arguments.
+
+Services are only shut down when the pipeline is finished.
+
+
+.. code-block:: python
+
+ from jaypore_ci import jci
+
+ # Services immediately return with a PASSED status
+ # If they exit with a Non ZERO code they are marked as FAILED, otherwise
+ # they are assumed to be PASSED
+ with jci.Pipeline() as p:
+ with p.stage("Services", is_service=True):
+ p.job("Mysql", None, image="mysql")
+ p.job("Redis", None, image="redis")
+ p.job("Api", "python3 -m src.run_api", image="python:3.11")
+ with p.stage("Testing"):
+ p.job("UnitTest", "python3 -m pytest -m unit_tests tests")
+ p.job("IntegrationTest", "python3 -m pytest -m integration_tests tests")
+ p.job("RegressionTest", "python3 -m pytest -m regression_tests tests")
+
+Import jobs with pip install
+----------------------------
+
+You can also import jobs defined by other people. Some examples of why you might want to do this:
+
+- A common lint policy for company / clients.
+- Common deploy targets and processes for things like docs / release notes.
+- Common notification targets like slack / telegram / email.
+- Common PR description checklist for company / clients.
+- Common PR merge policies / review policies etc.
+
+Since `JayporeCI` has a normal programming language as it's config language, most things can be solved without too much effort.
+
+
+Artifacts / Cache
+-----------------
+
+- All jobs run in a shared directory `jaypore_ci/run`.
+- Anything you write to this directory is available to all jobs so you can use this to pass artifacts / cache between jobs.
+- You can have a separate job to POST your artifacts to some remote location / git notes / S3 / gitea
+
+Testing your pipelines too!
+---------------------------
+
+Mistakes in the pipeline config can take a long time to catch if you are running a large test harness.
+
+With Jaypore CI it's fairly simple. Just write tests for your pipeline since it's normal Python code!
+
+To help you do this there are mock executors/remotes that you can use instead
+of Docker/Gitea. This example taken from Jaypore CI's own tests shows how you
+would test and make sure that jobs are running in order.
+
+.. code-block:: python
+
+ from jaypore_ci import jci, executors, remotes
+
+ executor = executors.Mock()
+ remote = remotes.Mock(branch="test_branch", sha="fake_sha")
+
+ with jci.Pipeline(executor=executor, remote=remote, poll_interval=0) as p:
+ for name in "pq":
+ p.job(name, name)
+ p.job("x", "x")
+ p.job("y", "y", depends_on=["x"])
+ p.job("z", "z", depends_on=["y"])
+ for name in "ab":
+ p.job(name, name)
+
+ order = pipeline.executor.get_execution_order()
+ # assert order == {}
+ assert order["x"] < order["y"] < order["z"]
+
+
+Contributing
+============
+
+- Main development happens on a self hosted gitea instance.
+- Source code is mirrored at `Github <https://github.com/theSage21/jaypore_ci>`_
+- If you are facing issues please file them on github.
+- If you want to open pull requests please open them on github. I'll try to review and merge them when I get time.
+
+Reference
+========
.. toctree::
:glob:
- ideas
- examples
reference/modules.rst
+
diff --git a/jaypore_ci/executors/docker.py b/jaypore_ci/executors/docker.py
@@ -1,11 +1,12 @@
"""
A docker executor for Jaypore CI.
"""
+import json
import subprocess
from rich import print as rprint
-from jaypore_ci.interfaces import Executor, TriggerFailed
+from jaypore_ci.interfaces import Executor, TriggerFailed, JobStatus
from jaypore_ci.logging import logger
@@ -172,23 +173,19 @@ class Docker(Executor):
return run_job.stdout.decode().strip()
raise TriggerFailed(run_job)
- def get_status(self, run_id: str) -> (str, str):
+ def get_status(self, run_id: str) -> JobStatus:
"""
Given a run_id, it will get the status for that run.
"""
- ps_out = __check_output__(f"docker ps -f 'id={run_id}' --no-trunc")
- is_running = run_id in ps_out
- # --- exit code
- exit_code = __check_output__(
- f"docker inspect {run_id}" " --format='{{.State.ExitCode}}'"
+ inspect = json.loads(__check_output__(f"docker inspect {run_id}"))[0]
+ status = JobStatus(
+ is_running=inspect["State"]["Running"],
+ exit_code=int(inspect["State"]["ExitCode"]),
+ logs="",
+ started_at=inspect["State"]["StartedAt"],
+ finished_at=inspect["State"]["FinishedAt"],
)
- exit_code = int(exit_code)
# --- logs
+ self.logging().debug("Check status", status=status)
logs = __check_output__(f"docker logs {run_id}")
- self.logging().debug(
- "Check status",
- run_id=run_id,
- is_running=is_running,
- exit_code=exit_code,
- )
- return is_running, exit_code, logs
+ return status._replace(logs=logs)
diff --git a/jaypore_ci/executors/mock.py b/jaypore_ci/executors/mock.py
@@ -3,7 +3,7 @@ A mock executor that actually does not run anything.
"""
import uuid
-from jaypore_ci.interfaces import Executor
+from jaypore_ci.interfaces import Executor, JobStatus
from jaypore_ci.logging import logger
@@ -125,14 +125,14 @@ class Mock(Executor):
)
return run_id
- def get_status(self, run_id: str) -> (str, str):
+ def get_status(self, run_id: str) -> JobStatus:
"""
Given a run_id, it will get the status for that run.
"""
- is_running, exit_code, logs = True, None, ""
+ status = JobStatus(True, None, "", "", "")
if run_id in self.__status__:
- is_running, exit_code, logs = False, 0, ""
- return is_running, exit_code, logs
+ status = JobStatus(False, 0, "fake logs", "", "")
+ return status
def get_execution_order(self):
return {name: i for i, (name, *log) in enumerate(self.__log__)}
diff --git a/jaypore_ci/interfaces.py b/jaypore_ci/interfaces.py
@@ -5,12 +5,21 @@ Currently only gitea and docker are supported as remote and executor
respectively.
"""
from enum import Enum
+from typing import NamedTuple
class TriggerFailed(Exception):
"Failure to trigger a job"
+class JobStatus(NamedTuple):
+ is_running: bool
+ exit_code: int
+ logs: str
+ started_at: str
+ finished_at: str
+
+
class Status(Enum):
"Each pipeline can ONLY be in any one of these statuses"
PENDING = 10
@@ -51,6 +60,12 @@ class Executor:
On exit the executor must clean up any pending / stuck / zombie jobs that are still there.
"""
+ def get_status(self, run_id: str) -> JobStatus:
+ """
+ Returns the status of a given run.
+ """
+ raise NotImplementedError()
+
class Remote:
"""
diff --git a/jaypore_ci/jci.py b/jaypore_ci/jci.py
@@ -133,16 +133,18 @@ class Job: # pylint: disable=too-many-instance-attributes
"""
if isinstance(self.command, str) and self.run_id is not None:
self.logging().debug("Checking job run")
- is_running, exit_code, logs = self.pipeline.executor.get_status(self.run_id)
+ status = self.pipeline.executor.get_status(self.run_id)
self.last_check = pendulum.now(TZ)
self.logging().debug(
- "Job run status found", is_running=is_running, exit_code=exit_code
+ "Job run status found",
+ is_running=status.is_running,
+ exit_code=status.exit_code,
)
- if is_running:
+ if status.is_running:
self.status = Status.RUNNING if not self.is_service else Status.PASSED
else:
- self.status = Status.PASSED if exit_code == 0 else Status.FAILED
- self.logs["stdout"] = reporters.gitea.clean_logs(logs)
+ self.status = Status.PASSED if status.exit_code == 0 else Status.FAILED
+ self.logs["stdout"] = reporters.gitea.clean_logs(status.logs)
if with_update_report:
self.update_report()
diff --git a/pyproject.toml b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "jaypore_ci"
-version = "0.1.8"
+version = "0.1.10"
description = ""
authors = ["arjoonn sharma <arjoonn.94@gmail.com>"]